diff --git a/etc/db/storm_be_ISAM_mysql_update_from_1.0.0_to_1.1.0.sql b/etc/db/storm_be_ISAM_mysql_update_from_1.0.0_to_1.1.0.sql deleted file mode 100644 index ca1bcdc73..000000000 --- a/etc/db/storm_be_ISAM_mysql_update_from_1.0.0_to_1.1.0.sql +++ /dev/null @@ -1,27 +0,0 @@ --- --- Update StoRM tape recall database from 1.0.0 to 1.1.0 --- - -DELETE FROM storm_be_ISAM.db_version; -INSERT INTO storm_be_ISAM.db_version (major,minor,revision,description) VALUES (1,1,0,'27 May 2011'); - -DROP TABLE IF EXISTS storm_be_ISAM.storage_file; - -ALTER TABLE storm_be_ISAM.tape_recall - ADD `groupTaskId` CHAR(36) NOT NULL, - ADD `inProgressTime` datetime, - ADD `finalStatusTime` datetime, - ADD INDEX groupTaskId_index (groupTaskId); - -ALTER TABLE storm_be_ISAM.storage_space - ADD `USED_SIZE` bigint(20) NOT NULL default '-1', - ADD `BUSY_SIZE` bigint(20) NOT NULL default '-1', - ADD `UNAVAILABLE_SIZE` bigint(20) NOT NULL default '-1', - ADD `AVAILABLE_SIZE` bigint(20) NOT NULL default '-1', - ADD `RESERVED_SIZE` bigint(20) NOT NULL default '-1', - ADD `UPDATE_TIME` TIMESTAMP NOT NULL default '1970-01-02 00:00:00', - MODIFY COLUMN `CREATED` TIMESTAMP NOT NULL default CURRENT_TIMESTAMP, - ADD INDEX ALIAS_index (ALIAS), - ADD INDEX TOKEN_index (SPACE_TOKEN); - - diff --git a/etc/db/storm_database_config.sh b/etc/db/storm_database_config.sh deleted file mode 100644 index ee4722aa1..000000000 --- a/etc/db/storm_database_config.sh +++ /dev/null @@ -1,147 +0,0 @@ -#!/bin/bash -# -# The following environment variables are used by this script and can be set outside: -# STORM_MYSQL_HOSTNAME (hostname.domain which runs mysql) -# STORM_DBSCRIPT_DIR (directory containing the StoRM scripts for the StoRM DB) -# MYSQL_PASSWORD (mysql root password) - - -################################ Set environment variables #################### -STORM_DB_NAME=storm_db -if [ -z "$STORM_MYSQL_HOSTNAME" ]; then - STORM_MYSQL_HOSTNAME=`hostname` - # extract the short name (i.e. stop at the first dot) - STORM_MYSQL_HOSTNAME_SHORT=`expr "$STORM_MYSQL_HOSTNAME" : '\([^.]*\)'` -fi - -if [ -z "$STORM_DBSCRIPT_DIR" ]; then - STORM_DBSCRIPT_DIR=/etc/storm/backend-server/db -fi - -if [ -z "$MYSQL_PASSWORD" ]; then - MYSQL_PASSWORD=storm -fi - - -############################### Function definition ########################### -function get_stormdb_version () { - local MYSQL_OPTS="-h $STORM_MYSQL_HOSTNAME -u root ${MYSQL_PWD_OPTION} " - local STORMDB_VERSION_MAJOR=`mysql $MYSQL_OPTS -s -e"use storm_db;select major from db_version;"` - local STORMDB_VERSION_MINOR=`mysql $MYSQL_OPTS -s -e"use storm_db;select minor from db_version;"` - local STORMDB_VERSION_REVISION=`mysql $MYSQL_OPTS -s -e"use storm_db;select revision from db_version;"` - STORMDB_VERSION="$STORMDB_VERSION_MAJOR.$STORMDB_VERSION_MINOR.$STORMDB_VERSION_REVISION" -} - -function get_stormbeISAM_version () { - local MYSQL_OPTS="-h $STORM_MYSQL_HOSTNAME -u root ${MYSQL_PWD_OPTION} " - local STORMBEISAM_VERSION_MAJOR=`mysql $MYSQL_OPTS -s -e"use storm_be_ISAM;select major from db_version;"` - local STORMBEISAM_VERSION_MINOR=`mysql $MYSQL_OPTS -s -e"use storm_be_ISAM;select minor from db_version;"` - local STORMBEISAM_VERSION_REVISION=`mysql $MYSQL_OPTS -s -e"use storm_be_ISAM;select revision from db_version;"` - STORMBEISAM_VERSION="$STORMBEISAM_VERSION_MAJOR.$STORMBEISAM_VERSION_MINOR.$STORMBEISAM_VERSION_REVISION" -} - -function set_transition_script_filename () { - if [ -n "$STORMDB_VERSION" ]; then - - tmp=`ls $STORM_DBSCRIPT_DIR/storm_mysql_update_from_${STORMDB_VERSION}* 2>&1` - - if [ $? -eq 0 ]; then - TRANSITION_SCRIPT_FILENAME=$tmp - else - TRANSITION_SCRIPT_FILENAME=script_not_found # foo value, just a filename that doesn't exist - fi - else - TRANSITION_SCRIPT_FILENAME=script_not_found # foo value, just a filename that doesn't exist - fi -} - -function set_stormbeISAM_transition_script_filename () { - if [ -n "$STORMBEISAM_VERSION" ]; then - - tmp=`ls $STORM_DBSCRIPT_DIR/storm_be_ISAM_mysql_update_from_${STORMBEISAM_VERSION}* 2>&1` - - if [ $? -eq 0 ]; then - TRANSITION_SCRIPT_FILENAME=$tmp - else - TRANSITION_SCRIPT_FILENAME=script_not_found # foo value, just a filename that doesn't exist - fi - else - TRANSITION_SCRIPT_FILENAME=script_not_found # foo value, just a filename that doesn't exist - fi -} - -function create_new_storm_db () { - echo "Creating new db..." - mysql -u root $MYSQL_PWD_OPTION < $STORM_DBSCRIPT_DIR/storm_mysql_tbl.sql - tmp=`mktemp /tmp/sql.XXXXXX` - - sed s/__HOST__/${STORM_MYSQL_HOSTNAME_SHORT}/g $STORM_DBSCRIPT_DIR/storm_mysql_grant.sql | \ - sed s/__STORMUSER__/${STORM_DB_USER}/g | \ - sed s/__HOSTDOMAIN__/${STORM_MYSQL_HOSTNAME}/g > $tmp - - mysql -u root $MYSQL_PWD_OPTION < $tmp - rm -f $tmp - echo "Created new DB" -} - -function update_storm_db () { - get_stormdb_version - set_transition_script_filename - while [ "$TRANSITION_SCRIPT_FILENAME" != script_not_found ] - do - if [ -e "$TRANSITION_SCRIPT_FILENAME" ]; then - mysql -u root $MYSQL_PWD_OPTION < $TRANSITION_SCRIPT_FILENAME - fi - get_stormdb_version - set_transition_script_filename - # After running the script the DB version should be changed, if not then - # there is nothing else to do and the DB is up to date. - done - echo "Update done!" -} - -function update_storm_be_ISAM () { - get_stormbeISAM_version - set_stormbeISAM_transition_script_filename - while [ "$TRANSITION_SCRIPT_FILENAME" != script_not_found ] - do - if [ -e "$TRANSITION_SCRIPT_FILENAME" ]; then - mysql -u root $MYSQL_PWD_OPTION < $TRANSITION_SCRIPT_FILENAME - fi - get_stormbeISAM_version - set_stormbeISAM_transition_script_filename - # After running the script the DB version should be changed, if not then - # there is nothing else to do and the DB is up to date. - done - echo "Update done!" -} - -################################## Main ####################################### -# check for the existence of mysql -which mysql > /dev/null 2> /dev/null -if [ "$?" -ne 0 ] # check "which" exit status -then - echo "Error: mysql not found (install mysql or add it to the PATH environment variable)." - exit 1 -fi - -#echo "*** WARNING: When you are asked for a password, it's the 'root' MySQL user password. ***" -# check if mysql need a root password -mysql -u root -e ";" 2>/dev/null -if [ "$?" -ne 0 ]; then # the exit status is not zero - MYSQL_PWD_OPTION="-p$MYSQL_PASSWORD"; -else # the exit status is zero, i.e. no passwd - MYSQL_PWD_OPTION="" -fi - -# check that the storm database exists -mysql -h $STORM_MYSQL_HOSTNAME -u root ${MYSQL_PWD_OPTION} -e"use ${STORM_DB_NAME};" > /dev/null 2> /dev/null -if [ "$?" -ne 0 ]; then - create_new_storm_db -else - update_storm_db - update_storm_be_ISAM -fi - -exit 0 - diff --git a/etc/db/storm_mysql_grant.sql b/etc/db/storm_mysql_grant.sql deleted file mode 100644 index bfad075f5..000000000 --- a/etc/db/storm_mysql_grant.sql +++ /dev/null @@ -1,28 +0,0 @@ ---################################################### ---# Copyright (c) 2008 on behalf of the INFN CNAF ---# The Italian National Institute for Nuclear Physics (INFN), ---# All rights reserved. ---# ---# createrole sql script for a database ---# ---# author: luca.magnoni@cnaf.infn.it ---# contributes: flavia.donno@cern.ch ---# changelog: Added grant permission on storm_be_ISAM database. ---# ---# ---################################################### -USE mysql; -GRANT ALL PRIVILEGES ON storm_db.* TO __STORMUSER__ IDENTIFIED BY '__STORMUSER__' WITH GRANT OPTION; -GRANT ALL PRIVILEGES ON storm_db.* TO __STORMUSER__@'localhost' IDENTIFIED BY '__STORMUSER__' WITH GRANT OPTION; -GRANT ALL PRIVILEGES ON storm_db.* TO __STORMUSER__@'__HOST__' IDENTIFIED BY '__STORMUSER__' WITH GRANT OPTION; -GRANT ALL PRIVILEGES ON storm_db.* TO __STORMUSER__@'__HOSTDOMAIN__' IDENTIFIED BY '__STORMUSER__' WITH GRANT OPTION; - -GRANT ALL PRIVILEGES ON storm_be_ISAM.* TO __STORMUSER__ IDENTIFIED BY '__STORMUSER__' WITH GRANT OPTION; -GRANT ALL PRIVILEGES ON storm_be_ISAM.* TO __STORMUSER__@'localhost' IDENTIFIED BY '__STORMUSER__' WITH GRANT OPTION; -GRANT ALL PRIVILEGES ON storm_be_ISAM.* TO __STORMUSER__@'__HOST__' IDENTIFIED BY '__STORMUSER__' WITH GRANT OPTION; -GRANT ALL PRIVILEGES ON storm_be_ISAM.* TO __STORMUSER__@'__HOSTDOMAIN__' IDENTIFIED BY '__STORMUSER__' WITH GRANT OPTION; - - -FLUSH PRIVILEGES; - - diff --git a/etc/db/storm_mysql_tbl.sql b/etc/db/storm_mysql_tbl.sql deleted file mode 100644 index 15d237862..000000000 --- a/etc/db/storm_mysql_tbl.sql +++ /dev/null @@ -1,461 +0,0 @@ ---################################################### ---# ---# Copyright (c) 2008 on behalf of the INFN CNAF ---# The Italian National Institute for Nuclear Physics (INFN), ---# All rights reserved. ---# ---# create StoRM databases ---# ---# author: luca.magnoni@cnaf.infn.it ---# changelog: Add "ON DELETE CASCADE" for requestDirOption. ---# ---################################################### - -CREATE DATABASE IF NOT EXISTS storm_db; - -USE storm_db; - -CREATE TABLE IF NOT EXISTS db_version ( - ID int NOT NULL auto_increment, - major int, - minor int, - revision int, - description VARCHAR(100), - primary key (ID) -) engine=InnoDB; - -DELETE FROM storm_db.db_version; -INSERT INTO storm_db.db_version (major,minor,revision,description) VALUES (1,7,2,'10 Mar 2015'); - -CREATE TABLE IF NOT EXISTS request_queue ( - ID int not null auto_increment, - config_FileStorageTypeID CHAR(1), - config_AccessPatternID CHAR(1), - config_ConnectionTypeID CHAR(1), - config_OverwriteID CHAR(1), - config_RequestTypeID VARCHAR(3) not null, - client_dn VARCHAR(255) BINARY, - u_token VARCHAR(255) BINARY, - retrytime int, - pinLifetime int, - s_token VARCHAR(255) BINARY, - status int not null, - errstring VARCHAR(255), - r_token VARCHAR(255) BINARY, - remainingTotalTime int NOT NULL DEFAULT -1, - fileLifetime int, - nbreqfiles int, - numOfCompleted int, - numOfWaiting int, - numOfFailed int, - timeStamp datetime not null, - proxy blob, - deferredStartTime int, - remainingDeferredStartTime int, - primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS request_Get ( - ID int not null auto_increment, - request_DirOptionID int, - request_queueID int, - sourceSURL text not null, - normalized_sourceSURL_StFN text, - sourceSURL_uniqueID int, - primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS status_Get ( - ID int not null auto_increment, - statusCode int not null, - explanation VARCHAR(255), - fileSize bigint, - estimatedWaitTime int, - remainingPinTime int, - transferURL text, - request_GetID int not null, - primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS request_Put ( - ID int not null auto_increment, - request_queueID int not null, - targetSURL text not null, - expectedFileSize bigint, - normalized_targetSURL_StFN text, - targetSURL_uniqueID int, - primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS status_Put ( - ID int not null auto_increment, - statusCode int not null, - explanation VARCHAR(255), - fileSize bigint, - estimatedWaitTime int, - remainingPinTime int, - remainingFileTime int, - transferURL text, - request_PutID int not null, - primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS request_BoL ( - ID int not null auto_increment, - sourceSURL text not null, - request_DirOptionID int, - request_queueID int, - normalized_sourceSURL_StFN text, - sourceSURL_uniqueID int, - primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS status_BoL ( - ID int not null auto_increment, - request_BoLID int, - statusCode int not null, - explanation VARCHAR(255), - fileSize bigint, - estimatedWaitTime int, - remainingPinTime int, - primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS request_Copy ( - ID int not null auto_increment, - request_queueID int, - request_DirOptionID int, - sourceSURL text not null, - targetSURL text not null, - normalized_sourceSURL_StFN text, - sourceSURL_uniqueID int, - normalized_targetSURL_StFN text, - targetSURL_uniqueID int, - primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS status_Copy ( - ID int not null auto_increment, - statusCode int not null, - explanation VARCHAR(255), - fileSize bigint, - estimatedWaitTime int, - remainingFileTime int, - request_CopyID int not null, - primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS request_ExtraInfo ( - ID int not null auto_increment, - request_queueID int, - status_GetID int, - request_queueID2 int, - status_PutID int, - ei_key VARCHAR(255) not null, - ei_value VARCHAR(255), - primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS request_RetentionPolicyInfo ( - ID int not null auto_increment, - request_queueID int not null, - config_RetentionPolicyID CHAR(1), - config_AccessLatencyID CHAR(1), - primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS request_ClientNetworks ( - ID int not null auto_increment, - network VARCHAR(255) not null, - request_queueID int, primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS request_TransferProtocols ( - ID int not null auto_increment, - request_queueID int, - config_ProtocolsID VARCHAR(30), - primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS request_DirOption ( - ID int not null auto_increment, - isSourceADirectory tinyint(1) default 0 not null, - allLevelRecursive tinyint(1) default 0, - numOfLevels int default 1, - primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS request_VOMSAttributes ( - ID int not null auto_increment, - request_queueID int, - vo VARCHAR(255) not null, - voms_group text, - voms_role text, - voms_capability text, - primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS volatile ( - ID int not null auto_increment, - file text not null, - start datetime not null, - fileLifetime int not null, - primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS jit ( - ID int not null auto_increment, - file text not null, - acl int not null, - uid int not null, - start datetime not null, - pinLifetime int not null, - gid int not null, - primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS config_Protocols ( - ID VARCHAR(30) not null, - primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS config_RetentionPolicy ( - ID CHAR(1) not null, - primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS config_AccessLatency ( - ID CHAR(1) not null, - primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS config_FileStorageType ( - ID CHAR(1) not null, - primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS config_AccessPattern ( - ID CHAR(1) not null, - primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS config_ConnectionType ( - ID CHAR(1) not null, - primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS config_Overwrite ( - ID CHAR(1) not null, - primary key (ID)) engine=InnoDB; - -CREATE TABLE IF NOT EXISTS config_RequestType ( - ID VARCHAR(3) not null, - primary key (ID)) engine=InnoDB; - - - -ALTER TABLE request_queue - add index FK_request_qu_2651 (config_FileStorageTypeID), - add constraint FK_request_qu_2651 foreign key (config_FileStorageTypeID) references config_FileStorageType (ID); - -ALTER TABLE request_queue - add index FK_request_qu_4029 (config_AccessPatternID), - add constraint FK_request_qu_4029 foreign key (config_AccessPatternID) references config_AccessPattern (ID); - -ALTER TABLE request_queue - add index FK_request_qu_8833 (config_ConnectionTypeID), - add constraint FK_request_qu_8833 foreign key (config_ConnectionTypeID) references config_ConnectionType (ID); - -ALTER TABLE request_queue - add index FK_request_qu_8815 (config_OverwriteID), - add constraint FK_request_qu_8815 foreign key (config_OverwriteID) references config_Overwrite (ID); - -ALTER TABLE request_queue - add index FK_request_qu_375 (config_RequestTypeID), - add constraint FK_request_qu_375 foreign key (config_RequestTypeID) references config_RequestType (ID); - -CREATE INDEX r_token_index ON request_queue (r_token(8)); -CREATE INDEX status_index on request_queue (status); - -ALTER TABLE request_Get - add index FK_request_Ge_9630 (request_DirOptionID), - add constraint FK_request_Ge_9630 foreign key (request_DirOptionID) references request_DirOption (ID) ON DELETE CASCADE; - -ALTER TABLE request_Get - add index FK_request_Ge_3811 (request_queueID), - add constraint FK_request_Ge_3811 foreign key (request_queueID) references request_queue (ID) ON DELETE CASCADE; - -CREATE INDEX index_sourceSURL_uniqueID on request_Get (sourceSURL_uniqueID); - -ALTER TABLE status_Get - add index FK_status_Get_4853 (request_GetID), - add constraint FK_status_Get_4853 foreign key (request_GetID) references request_Get (ID) ON DELETE CASCADE; - -ALTER TABLE request_Put - add index FK_request_Pu_4665 (request_queueID), - add constraint FK_request_Pu_4665 foreign key (request_queueID) references request_queue (ID) ON DELETE CASCADE; - -CREATE INDEX index_targetSURL on request_Put (targetSURL(255)); -CREATE INDEX index_targetSURL_uniqueID on request_Put (targetSURL_uniqueID); - -ALTER TABLE status_Put - add index FK_status_Put_3223 (request_PutID), - add constraint FK_status_Put_3223 foreign key (request_PutID) references request_Put (ID) ON DELETE CASCADE; - -CREATE INDEX statusCode_index on status_Put (statusCode); -CREATE INDEX statusCodeGet_index on status_Get (statusCode); -CREATE INDEX transferURL_index ON status_Put (transferURL(255)); - -ALTER TABLE request_BoL - add index FK_request_Bo_4166 (request_DirOptionID), - add constraint FK_request_Bo_4166 foreign key (request_DirOptionID) references request_DirOption (ID) ON DELETE CASCADE; - -ALTER TABLE request_BoL - add index FK_request_Bo_8346 (request_queueID), - add constraint FK_request_Bo_8346 foreign key (request_queueID) references request_queue (ID) ON DELETE CASCADE; - -CREATE INDEX index_sourceSURL_uniqueID on request_BoL (sourceSURL_uniqueID); - -ALTER TABLE status_BoL - add index FK_status_BoL_1747 (request_BoLID), - add constraint FK_status_BoL_1747 foreign key (request_BoLID) references request_BoL (ID) ON DELETE CASCADE; - -ALTER TABLE request_Copy - add index FK_request_Co_6810 (request_queueID), - add constraint FK_request_Co_6810 foreign key (request_queueID) references request_queue (ID) ON DELETE CASCADE; - -ALTER TABLE request_Copy - add index FK_request_Co_2630 (request_DirOptionID), - add constraint FK_request_Co_2630 foreign key (request_DirOptionID) references request_DirOption (ID) ON DELETE CASCADE; - -CREATE INDEX index_sourceSURL_uniqueID on request_Copy (sourceSURL_uniqueID); -CREATE INDEX index_targetSURL_uniqueID on request_Copy (targetSURL_uniqueID); - -ALTER TABLE status_Copy - add index FK_status_Cop_447 (request_CopyID), - add constraint FK_status_Cop_447 foreign key (request_CopyID) references request_Copy (ID) ON DELETE CASCADE; - -ALTER TABLE request_ExtraInfo - add index FK_request_Ex_2570 (request_queueID), - add constraint FK_request_Ex_2570 foreign key (request_queueID) references request_queue (ID) ON DELETE CASCADE; - -ALTER TABLE request_ExtraInfo - add index FK_request_Ex_9422 (status_GetID), - add constraint FK_request_Ex_9422 foreign key (status_GetID) references status_Get (ID) ON DELETE CASCADE; - -ALTER TABLE request_ExtraInfo - add index FK_request_Ex_9425 (request_queueID2), - add constraint FK_request_Ex_9425 foreign key (request_queueID2) references request_queue (ID) ON DELETE CASCADE; - -ALTER TABLE request_ExtraInfo - add index FK_request_Ex_8646 (status_PutID), - add constraint FK_request_Ex_8646 foreign key (status_PutID) references status_Put (ID) ON DELETE CASCADE; - -ALTER TABLE request_RetentionPolicyInfo - add index FK_request_Re_5291 (request_queueID), - add constraint FK_request_Re_5291 foreign key (request_queueID) references request_queue (ID) ON DELETE CASCADE; - -ALTER TABLE request_RetentionPolicyInfo - add index FK_request_Re_503 (config_RetentionPolicyID), - add constraint FK_request_Re_503 foreign key (config_RetentionPolicyID) references config_RetentionPolicy (ID); - -ALTER TABLE request_RetentionPolicyInfo - add index FK_request_Re_2860 (config_AccessLatencyID), - add constraint FK_request_Re_2860 foreign key (config_AccessLatencyID) references config_AccessLatency (ID); - -ALTER TABLE request_ClientNetworks - add index FK_request_Cl_4686 (request_queueID), - add constraint FK_request_Cl_4686 foreign key (request_queueID) references request_queue (ID) ON DELETE CASCADE; - -ALTER TABLE request_TransferProtocols - add index FK_request_Tr_6848 (request_queueID), - add constraint FK_request_Tr_6848 foreign key (request_queueID) references request_queue (ID) ON DELETE CASCADE; - -ALTER TABLE request_TransferProtocols - add index FK_request_Tr_8127 (config_ProtocolsID), - add constraint FK_request_Tr_8127 foreign key (config_ProtocolsID) references config_Protocols (ID); - -ALTER TABLE request_VOMSAttributes - add index FK_request_VO_5290 (request_queueID), - add constraint FK_request_VO_5290 foreign key (request_queueID) references request_queue (ID) ON DELETE CASCADE; - -CREATE INDEX file_index ON volatile (file(255)); - -REPLACE INTO config_Protocols (ID) VALUES ('file'); -REPLACE INTO config_Protocols (ID) VALUES ('gsiftp'); -REPLACE INTO config_Protocols (ID) VALUES ('rfio'); -REPLACE INTO config_Protocols (ID) VALUES ('root'); -REPLACE INTO config_Protocols (ID) VALUES ('http'); -REPLACE INTO config_Protocols (ID) VALUES ('https'); -REPLACE INTO config_Protocols (ID) VALUES ('xroot'); - -REPLACE INTO config_Overwrite (ID) VALUES ('N'); -REPLACE INTO config_Overwrite (ID) VALUES ('A'); -REPLACE INTO config_Overwrite (ID) VALUES ('D'); - -REPLACE INTO config_FileStorageType (ID) VALUES ('V'); -REPLACE INTO config_FileStorageType (ID) VALUES ('P'); -REPLACE INTO config_FileStorageType (ID) VALUES ('D'); - -REPLACE INTO config_RequestType (ID) VALUES ('BOL'); -REPLACE INTO config_RequestType (ID) VALUES ('PTG'); -REPLACE INTO config_RequestType (ID) VALUES ('PTP'); -REPLACE INTO config_RequestType (ID) VALUES ('COP'); - -REPLACE INTO config_RetentionPolicy (ID) VALUES ('R'); -REPLACE INTO config_RetentionPolicy (ID) VALUES ('C'); -REPLACE INTO config_RetentionPolicy (ID) VALUES ('O'); - -REPLACE INTO config_AccessLatency (ID) VALUES ('O'); -REPLACE INTO config_AccessLatency (ID) VALUES ('N'); - --- --- StoRM Backend DATABASE --- storm_be_ISAM --- - -CREATE DATABASE IF NOT EXISTS storm_be_ISAM; -USE storm_be_ISAM; - -CREATE TABLE IF NOT EXISTS db_version ( - ID int NOT NULL auto_increment, - major int, - minor int, - revision int, - description VARCHAR(100), - primary key (ID) -) ENGINE=MyISAM DEFAULT CHARSET=latin1; - -DELETE FROM storm_be_ISAM.db_version; -INSERT INTO storm_be_ISAM.db_version (major,minor,revision,description) VALUES (1,1,0,'27 May 2011'); - --- --- Table structure for table `storage_space` --- -CREATE TABLE IF NOT EXISTS `storage_space` ( - `SS_ID` bigint(20) NOT NULL auto_increment, - `USERDN` VARCHAR(150) NOT NULL default '', - `VOGROUP` VARCHAR(20) NOT NULL default '', - `ALIAS` VARCHAR(100) default NULL, - `SPACE_TOKEN` VARCHAR(100) BINARY NOT NULL default '', - `CREATED` TIMESTAMP NOT NULL default CURRENT_TIMESTAMP, - `TOTAL_SIZE` bigint(20) NOT NULL default '0', - `GUAR_SIZE` bigint(20) NOT NULL default '0', - `FREE_SIZE` bigint(20) default NULL default '-1', - `SPACE_FILE` VARCHAR(145) NOT NULL default '', - `STORAGE_INFO` VARCHAR(255) default NULL, - `LIFETIME` bigint(20) default NULL, - `SPACE_TYPE` VARCHAR(10) NOT NULL default '', - `USED_SIZE` bigint(20) NOT NULL default '-1', - `BUSY_SIZE` bigint(20) NOT NULL default '-1', - `UNAVAILABLE_SIZE` bigint(20) NOT NULL default '-1', - `AVAILABLE_SIZE` bigint(20) NOT NULL default '-1', - `RESERVED_SIZE` bigint(20) NOT NULL default '-1', - `UPDATE_TIME` TIMESTAMP NOT NULL default '1970-01-02 00:00:00', - PRIMARY KEY (`SS_ID`), - INDEX ALIAS_index (`ALIAS`), - INDEX TOKEN_index (`SPACE_TOKEN`), - KEY `SPACE_NAME` (`SPACE_TOKEN`) -) ENGINE=MyISAM DEFAULT CHARSET=latin1; - - --- --- Table structure for table `tape_recall` --- -CREATE TABLE IF NOT EXISTS tape_recall ( - taskId CHAR(36) NOT NULL, - requestToken VARCHAR(255) BINARY NOT NULL, - requestType CHAR(4), - fileName text not null, - pinLifetime int, - status int, - voName VARCHAR(255) BINARY, - userID VARCHAR(255) BINARY, - retryAttempt int, - timeStamp datetime not null, - deferredStartTime datetime not null, - groupTaskId CHAR(36) NOT NULL, - inProgressTime datetime, - finalStatusTime datetime, - primary key (taskId , requestToken)) ENGINE=InnoDB; - -ALTER TABLE tape_recall - ADD INDEX deferredStartTime (deferredStartTime), - ADD INDEX groupTaskId_index (groupTaskId); \ No newline at end of file diff --git a/etc/db/storm_mysql_update_from_1.7.0_to_1.7.1.sql b/etc/db/storm_mysql_update_from_1.7.0_to_1.7.1.sql deleted file mode 100644 index de72ab900..000000000 --- a/etc/db/storm_mysql_update_from_1.7.0_to_1.7.1.sql +++ /dev/null @@ -1,4 +0,0 @@ -DELETE FROM storm_db.db_version; -INSERT INTO storm_db.db_version (major,minor,revision,description) VALUES (1,7,1,'27 Jan 2015'); - -INSERT INTO storm_db.config_Protocols VALUES ('xroot'); \ No newline at end of file diff --git a/etc/db/storm_mysql_update_from_1.7.1_to_1.7.2.sql b/etc/db/storm_mysql_update_from_1.7.1_to_1.7.2.sql deleted file mode 100644 index 017115a14..000000000 --- a/etc/db/storm_mysql_update_from_1.7.1_to_1.7.2.sql +++ /dev/null @@ -1,5 +0,0 @@ -DELETE FROM storm_db.db_version; -INSERT INTO storm_db.db_version (major,minor,revision,description) VALUES (1,7,2,'10 Mar 2015'); - -CREATE INDEX statusCodeGet_index on storm_db.status_Get (statusCode); - diff --git a/etc/storm.properties.template b/etc/storm.properties.template index 43bde6974..a4dd5358e 100644 --- a/etc/storm.properties.template +++ b/etc/storm.properties.template @@ -15,7 +15,7 @@ # ============================ # StoRM Service DNS # ============================ -# hostname with which the service is published +# Host with which the SRM service is published on BDII storm.service.FE-public.hostname = @@ -49,10 +49,18 @@ storm.service.SURL.default-ports = # FE/BE communication RDBMS # ============================ # -# Parameters to connect to the DB used as channel for the requests. -storm.service.request-db.host = -storm.service.request-db.username = -storm.service.request-db.passwd = +# Parameters to connect to the DB used as channel for the requests. +# Deprecated since v1.12.0: +# storm.service.request-db.host = +# storm.service.request-db.username = +# storm.service.request-db.passwd = +# storm.service.request-db.properties = +# Added v1.12.0 +storm.service.db.host = +storm.service.db.username = +storm.service.db.password = +storm.service.db.port = +storm.service.db.properties = ############################################# ############ PROFILE PARAMETERS ############ @@ -104,8 +112,24 @@ default.storagetype = P # ============================ # BE-private RDBMS # ============================ -persistence.internal-db.connection-pool.maxActive = 50 -persistence.internal-db.connection-pool.maxWait = 50 + +# Removed by 1.12.0 +# persistence.internal-db.connection-pool.maxActive = 50 +# persistence.internal-db.connection-pool.maxWait = 50 +# persistence.internal-db.connection-pool.size = 50 + +# Added with 1.12.0: + +# Sets the maximum permitted lifetime of a connection in milliseconds. A value of zero or less indicates an infinite lifetime. +storm.service.db.pool.maxWaitMillis = -1 +storm.service.db.pool.testOnBorrow = true +storm.service.db.pool.testWhileIdle = true + +storm.service.db.pool.stormdb.maxTotal = 500 +storm.service.db.pool.stormdb.minIdle = 50 + +storm.service.db.pool.stormbeisam.maxTotal = 200 +storm.service.db.pool.stormbeisam.minIdle = 10 # ============================ @@ -132,8 +156,8 @@ scheduler.chunksched.copy.queueSize=500 # ============================ # ASYNCH PICKER Component parameters # ============================ -asynch.db.ReconnectPeriod=18000 -asynch.db.DelayPeriod=30 +asynch.db.ReconnectPeriod=18000 # removed since v1.12.0 +asynch.db.DelayPeriod=30 # removed since v1.12.0 asynch.PickingInitialDelay=1 # Polling time in seconds for pick up new requests from DB asynch.PickingTimeInterval=2 @@ -211,7 +235,15 @@ purge.size=800 #Time after that the GC consider a _terminated_ request as garbage #Default: 21600s (6h) expired.request.time=21600 - +# +#Time after that the GC consider an _in-progress_ PtP request as terminated +#Deprecates "expired.inprogress.time" +#Default: 2592000s (720h) +expired.inprogress.ptp.time=2592000 +# +#Time after that the GC consider an _in-progress_ BoL request as terminated +#Default: 2592000s (720h) +expired.inprogress.bol.time=2592000 # ========================================================== # Expired-Put-Requests-Agent parameters @@ -229,6 +261,9 @@ transit.delay = 10 # Skip ACL setup for PtG requests ptg.skip-acl-setup = false +# Skip ACL setup for PtP requests +ptp.skip-acl-setup = false + # The caching policy for successful name lookups from the name service. # The value is specified as integer to indicate the number of seconds to cache the successful lookup. diff --git a/pom.xml b/pom.xml index de61913c6..af651a4fb 100644 --- a/pom.xml +++ b/pom.xml @@ -29,7 +29,7 @@ 2.25.1 1.1 8.1.9.v20130131 - 3.3.0 + 5.14.0 20080701 4.13.2 1.2.3 @@ -56,7 +56,7 @@ 11 - 1.0.7 + 2.0.0 UTF-8 @@ -158,7 +158,6 @@ maven-assembly-plugin ${plugin.assembly.version} - storm-backend-server false ${project.build.directory} @@ -307,6 +306,12 @@ ${jnaVersion} + + net.java.dev.jna + jna-platform + ${jnaVersion} + + org.codehaus.jettison jettison @@ -490,4 +495,4 @@ - \ No newline at end of file + diff --git a/src/main/assemblies/assembly.xml b/src/main/assemblies/assembly.xml index 48771a721..69a77b817 100644 --- a/src/main/assemblies/assembly.xml +++ b/src/main/assemblies/assembly.xml @@ -47,19 +47,6 @@ etc/storm/backend-server - - - etc/db - - storm_be_ISAM_mysql_update_from_1.0.0_to_1.1.0.sql - storm_mysql_grant.sql - storm_mysql_tbl.sql - storm_mysql_update_from_1.7.0_to_1.7.1.sql - storm_mysql_update_from_1.7.1_to_1.7.2.sql - - etc/storm/backend-server/db - - src @@ -84,12 +71,6 @@ - - etc/db/storm_database_config.sh - etc/storm/backend-server/db - 0755 - - target/${artifactId}.jar usr/share/java/storm-backend-server diff --git a/src/main/java/it/grid/storm/Main.java b/src/main/java/it/grid/storm/Main.java index 7e636ad5a..f5f439756 100644 --- a/src/main/java/it/grid/storm/Main.java +++ b/src/main/java/it/grid/storm/Main.java @@ -6,20 +6,57 @@ import static java.lang.System.exit; +import java.io.IOException; + +import javax.xml.parsers.ParserConfigurationException; + +import org.apache.commons.configuration.ConfigurationException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.xml.sax.SAXException; +import it.grid.storm.config.StormConfiguration; +import it.grid.storm.namespace.Namespace; +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.startup.Bootstrap; import it.grid.storm.startup.BootstrapException; public class Main { private static final Logger log = LoggerFactory.getLogger(Main.class); + public static final String DEFAULT_CONFIG_DIR = "/etc/storm/backend-server"; + public static final String DEFAULT_CONFIG_FILE = DEFAULT_CONFIG_DIR + "/storm.properties"; + public static final String DEFAULT_NAMESPACE_FILE = DEFAULT_CONFIG_DIR + "/namespace.xml"; + public static final String DEFAULT_NAMESPACE_SCHEMA_FILE = + DEFAULT_CONFIG_DIR + "/namespace-1.5.0.xsd"; + public static final String DEFAULT_LOGGING_FILE = DEFAULT_CONFIG_DIR + "/logging.xml"; + private Main() {} public static void main(String[] args) { - StoRM storm = new StoRM(); + log.info("Configure logging from {} ...", DEFAULT_LOGGING_FILE); + Bootstrap.configureLogging(DEFAULT_LOGGING_FILE); + + + log.info("Load configuration from {} ...", DEFAULT_CONFIG_FILE); + try { + StormConfiguration.init(DEFAULT_CONFIG_FILE); + } catch (IOException | ConfigurationException e) { + log.error(e.getMessage(), e); + exit(1); + } + + log.info("Load namespace from {} ...", DEFAULT_NAMESPACE_FILE); + try { + Namespace.init(DEFAULT_NAMESPACE_FILE, true); + } catch (RuntimeException | NamespaceException | ConfigurationException | ParserConfigurationException | SAXException | IOException e) { + log.error(e.getMessage(), e); + exit(1); + } + + StoRM storm = new StoRM(StormConfiguration.getInstance(), Namespace.getInstance()); try { storm.init(); diff --git a/src/main/java/it/grid/storm/StoRM.java b/src/main/java/it/grid/storm/StoRM.java index 76d441167..e0238456a 100644 --- a/src/main/java/it/grid/storm/StoRM.java +++ b/src/main/java/it/grid/storm/StoRM.java @@ -19,35 +19,37 @@ import it.grid.storm.asynch.AdvancedPicker; import it.grid.storm.catalogs.ReservedSpaceCatalog; -import it.grid.storm.catalogs.StoRMDataSource; -import it.grid.storm.catalogs.timertasks.ExpiredPutRequestsAgent; +import it.grid.storm.catalogs.executors.RequestFinalizerService; +import it.grid.storm.catalogs.timertasks.RequestsGarbageCollector; import it.grid.storm.check.CheckManager; import it.grid.storm.check.CheckResponse; import it.grid.storm.check.CheckStatus; import it.grid.storm.check.SimpleCheckManager; -import it.grid.storm.config.Configuration; -import it.grid.storm.health.HealthDirector; +import it.grid.storm.check.sanity.filesystem.SupportedFSType; +import it.grid.storm.config.StormConfiguration; +import it.grid.storm.health.HealthMonitor; import it.grid.storm.info.du.DiskUsageService; import it.grid.storm.metrics.StormMetricsReporter; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceInterface; +import it.grid.storm.namespace.Namespace; +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.model.Property; +import it.grid.storm.namespace.model.Property.SizeUnitType; +import it.grid.storm.namespace.model.Quota; import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.rest.RestServer; +import it.grid.storm.space.SpaceHelper; +import it.grid.storm.space.gpfsquota.GPFSFilesetQuotaInfo; import it.grid.storm.space.gpfsquota.GPFSQuotaManager; +import it.grid.storm.space.gpfsquota.GetGPFSFilesetQuotaInfoCommand; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TSpaceToken; import it.grid.storm.startup.Bootstrap; import it.grid.storm.startup.BootstrapException; import it.grid.storm.synchcall.SimpleSynchcallDispatcher; +import it.grid.storm.util.GPFSSizeHelper; import it.grid.storm.xmlrpc.StoRMXmlRpcException; import it.grid.storm.xmlrpc.XMLRPCHttpServer; -/** - * This class represents a StoRM as a whole: it sets the configuration file which contains - * properties necessary for other classes of StoRM, it sets up logging, as well as the advanced - * picker. - * - * @author EGRID - ICTP Trieste; INFN - CNAF Bologna @date March 28th, 2005 @version 7.0 - */ - public class StoRM { private static final Logger log = LoggerFactory.getLogger(StoRM.class); @@ -56,54 +58,70 @@ public class StoRM { private XMLRPCHttpServer xmlrpcServer; // Timer object in charge to call periodically the Space Garbage Collector - private final Timer gc = new Timer(); + private final Timer gc; private TimerTask cleaningTask; - private boolean isSpaceGCRunning = false; + private boolean isSpaceGCRunning; /* - * Timer object in charge of transit expired put requests from SRM_SPACE_AVAILABLE to - * SRM_FILE_LIFETIME_EXPIRED and from SRM_REQUEST_INPROGRESS to SRM_FAILURE + * Agent in charge of transit expired ptg/ptp/bol requests to final statuses */ - private final Timer transiter = new Timer(); - private TimerTask expiredAgent; - private boolean isExpiredAgentRunning = false; - - private boolean isDiskUsageServiceEnabled = false; + private RequestFinalizerService expiredAgent; + private boolean isExpiredAgentRunning; + + /* Requests Garbage Collector */ + private final Timer rgc; + private TimerTask rgcTask; + private boolean isRequestGCRunning; + + private boolean isDiskUsageServiceEnabled; private DiskUsageService duService; + private boolean isPickerRunning; + private boolean isXmlrpcServerRunning; + + private boolean isRestServerRunning; + private RestServer restServer; + + private final StormConfiguration config; private final ReservedSpaceCatalog spaceCatalog; + private final Namespace namespace; - private boolean isPickerRunning = false; - private boolean isXmlrpcServerRunning = false; + public StoRM(StormConfiguration config, Namespace namespace) { - private boolean isRestServerRunning = false; - private RestServer restServer; + this.config = config; + this.namespace = namespace; + this.spaceCatalog = ReservedSpaceCatalog.getInstance(); - private final Configuration config; + this.picker = new AdvancedPicker(); + this.isPickerRunning = false; - public StoRM() { + this.isXmlrpcServerRunning = false; - config = Configuration.getInstance(); - picker = new AdvancedPicker(); - spaceCatalog = new ReservedSpaceCatalog(); + this.isRestServerRunning = false; + this.gc = new Timer(); + this.isSpaceGCRunning = false; + this.isExpiredAgentRunning = false; + + this.rgc = new Timer(); + this.isRequestGCRunning = false; + + this.isDiskUsageServiceEnabled = false; } public void init() throws BootstrapException { configureIPv6(); - configureLogging(); + handleTotalOnlineSizeFromGPFSQuota(); + + updateSA(); configureSecurity(); configureMetricsReporting(); - configureStoRMDataSource(); - - loadNamespaceConfiguration(); - - HealthDirector.initializeDirector(false); + HealthMonitor.init(); loadPathAuthzDBConfiguration(); @@ -126,11 +144,84 @@ private void configureIPv6() { log.info("java.net.preferIPv6Addresses is {}", System.getProperty("java.net.preferIPv6Addresses")); } - private void configureLogging() { + private void handleTotalOnlineSizeFromGPFSQuota() { + + namespace.getAllDefinedVFS().forEach(storageArea -> { + if (SupportedFSType.parseFS(storageArea.getFSType()) == SupportedFSType.GPFS) { + Quota quota = storageArea.getCapabilities().getQuota(); + if (quota != null && quota.getEnabled()) { + + GPFSFilesetQuotaInfo quotaInfo = getGPFSQuotaInfo(storageArea); + if (quotaInfo != null) { + updateTotalOnlineSizeFromGPFSQuota(storageArea, quotaInfo); + } + } + } + }); + } + + private GPFSFilesetQuotaInfo getGPFSQuotaInfo(VirtualFS storageArea) { + + GetGPFSFilesetQuotaInfoCommand cmd = new GetGPFSFilesetQuotaInfoCommand(storageArea); + + try { + return cmd.call(); + } catch (Throwable t) { + log.warn( + "Cannot get quota information out of GPFS. Using the TotalOnlineSize in namespace.xml " + + "for Storage Area {}. Reason: {}", + storageArea.getAliasName(), t.getMessage()); + return null; + } + } + + private void updateTotalOnlineSizeFromGPFSQuota(VirtualFS storageArea, + GPFSFilesetQuotaInfo quotaInfo) { + + long gpfsTotalOnlineSize = GPFSSizeHelper.getBytesFromKIB(quotaInfo.getBlockSoftLimit()); + Property newProperties = Property.from(storageArea.getProperties()); + try { + newProperties.setTotalOnlineSize(SizeUnitType.BYTE.getTypeName(), gpfsTotalOnlineSize); + storageArea.setProperties(newProperties); + log.warn("TotalOnlineSize as specified in namespace.xml will be ignored " + + "since quota is enabled on the GPFS {} Storage Area.", storageArea.getAliasName()); + } catch (NamespaceException e) { + log.warn( + "Cannot get quota information out of GPFS. Using the TotalOnlineSize in namespace.xml " + + "for Storage Area {}.", + storageArea.getAliasName(), e); + } + } + + private void updateSA() { + + SpaceHelper spaceHelp = new SpaceHelper(); + log.debug("Updating Space Catalog with Storage Area defined within NAMESPACE"); + namespace.getAllDefinedVFS().forEach(vfs ->{ + + String vfsAliasName = vfs.getAliasName(); + log.debug(" Considering VFS : {}", vfsAliasName); + String aliasName = vfs.getSpaceTokenDescription(); + if (aliasName == null) { + // Found a VFS without the optional element Space Token Description + log.debug( + "XMLNamespaceParser.UpdateSA() : Found a VFS ('{}') without space-token-description. " + + "Skipping the Update of SA", + vfsAliasName); + } else { + TSizeInBytes onlineSize = vfs.getProperties().getTotalOnlineSize(); + String spaceFileName = vfs.getRootPath(); + TSpaceToken spaceToken = spaceHelp.createVOSA_Token(aliasName, onlineSize, spaceFileName); + vfs.setSpaceToken(spaceToken); + + log.debug(" Updating SA ('{}'), token:'{}', onlineSize:'{}', spaceFileName:'{}'", aliasName, + spaceToken, onlineSize, spaceFileName); + } + + }); + spaceHelp.purgeOldVOSA_token(); + log.debug("Updating Space Catalog... DONE!!"); - String configurationDir = config.configurationDir(); - String logFile = configurationDir + "logging.xml"; - Bootstrap.configureLogging(logFile); } private void configureSecurity() { @@ -155,12 +246,6 @@ private void configureMetricsReporting() { } - private void loadNamespaceConfiguration() { - - NamespaceDirector.initializeDirector(); - - } - private void loadPathAuthzDBConfiguration() throws BootstrapException { String pathAuthzDBFileName = config.configurationDir() + "path-authz.db"; @@ -214,11 +299,6 @@ private void performSanityChecks() throws BootstrapException { } - private void configureStoRMDataSource() { - - StoRMDataSource.init(); - } - /** * Method used to start the picker. */ @@ -284,11 +364,11 @@ public synchronized void stopXmlRpcServer() { private void configureRestService() { - int restServicePort = Configuration.getInstance().getRestServicesPort(); - boolean isTokenEnabled = Configuration.getInstance().getXmlRpcTokenEnabled(); - String token = Configuration.getInstance().getXmlRpcToken(); - int maxThreads = Configuration.getInstance().getRestServicesMaxThreads(); - int maxQueueSize = Configuration.getInstance().getRestServicesMaxQueueSize(); + int restServicePort = StormConfiguration.getInstance().getRestServicesPort(); + boolean isTokenEnabled = StormConfiguration.getInstance().getXmlRpcTokenEnabled(); + String token = StormConfiguration.getInstance().getXmlRpcToken(); + int maxThreads = StormConfiguration.getInstance().getRestServicesMaxThreads(); + int maxQueueSize = StormConfiguration.getInstance().getRestServicesMaxQueueSize(); restServer = new RestServer(restServicePort, maxThreads, maxQueueSize, isTokenEnabled, token); } @@ -388,14 +468,6 @@ public synchronized boolean spaceGCIsRunning() { return isSpaceGCRunning; } - /** - * Starts the internal timer needed to periodically check and transit requests whose pinLifetime - * has expired and are in SRM_SPACE_AVAILABLE, to SRM_FILE_LIFETIME_EXPIRED. Moreover, the - * physical file corresponding to the SURL gets removed; then any JiT entry gets removed, except - * those on traverse for the parent directory; finally any volatile entry gets removed too. This - * internal timer also transit requests whose status is still SRM_REQUEST_INPROGRESS after a - * configured period to SRM_FAILURE. - */ public synchronized void startExpiredAgent() { if (isExpiredAgentRunning) { @@ -403,16 +475,8 @@ public synchronized void startExpiredAgent() { return; } - /* Delay time before starting cleaning thread! Set to 1 minute */ - final long delay = config.getTransitInitialDelay() * 1000L; - /* Period of execution of cleaning! Set to 1 hour */ - final long period = config.getTransitTimeInterval() * 1000L; - /* Expiration time before starting move in-progress requests to failure */ - final long inProgressExpirationTime = config.getInProgressPutRequestExpirationTime(); - log.debug("Starting Expired Agent."); - expiredAgent = new ExpiredPutRequestsAgent(inProgressExpirationTime); - transiter.scheduleAtFixedRate(expiredAgent, delay, period); + expiredAgent = new RequestFinalizerService(config); isExpiredAgentRunning = true; log.debug("Expired Agent started."); } @@ -426,7 +490,7 @@ public synchronized void stopExpiredAgent() { log.debug("Stopping Expired Agent."); if (expiredAgent != null) { - expiredAgent.cancel(); + expiredAgent.stop(); } log.debug("Expired Agent stopped."); isExpiredAgentRunning = false; @@ -441,7 +505,7 @@ private void configureDiskUsageService() { isDiskUsageServiceEnabled = config.getDiskUsageServiceEnabled(); - NamespaceInterface namespace = NamespaceDirector.getNamespace(); + Namespace namespace = Namespace.getInstance(); List quotaEnabledVfs = namespace.getVFSWithQuotaEnabled(); List sas = namespace.getAllDefinedVFS() .stream() @@ -496,6 +560,40 @@ public synchronized void stopDiskUsageService() { } } + public synchronized void startRequestGarbageCollector() { + + if (isRequestGCRunning) { + log.debug("Requests Garbage Collector is already running."); + return; + } + + /* Delay time before starting cleaning thread */ + final long delay = config.getCleaningInitialDelay() * 1000L; + /* Period of execution of cleaning */ + final long period = config.getCleaningTimeInterval() * 1000L; + + log.debug("Starting Requests Garbage Collector ."); + rgcTask = new RequestsGarbageCollector(rgc, period); + rgc.schedule(rgcTask, delay); + isRequestGCRunning = true; + log.debug("Requests Garbage Collector started."); + } + + public synchronized void stopRequestGarbageCollector() { + + if (!isRequestGCRunning) { + log.debug("Requests Garbage Collector is not running."); + return; + } + + log.debug("Stopping Requests Garbage Collector."); + if (rgcTask != null) { + rgcTask.cancel(); + } + log.debug("Requests Garbage Collector stopped."); + isRequestGCRunning = false; + } + public void startServices() throws Exception { startPicker(); @@ -503,6 +601,7 @@ public void startServices() throws Exception { startRestServer(); startSpaceGC(); startExpiredAgent(); + startRequestGarbageCollector(); startDiskUsageService(); } @@ -513,6 +612,7 @@ public void stopServices() { stopRestServer(); stopSpaceGC(); stopExpiredAgent(); + stopRequestGarbageCollector(); stopDiskUsageService(); GPFSQuotaManager.INSTANCE.shutdown(); diff --git a/src/main/java/it/grid/storm/acl/AclManager.java b/src/main/java/it/grid/storm/acl/AclManager.java index 42b5a831c..5a1f89e36 100644 --- a/src/main/java/it/grid/storm/acl/AclManager.java +++ b/src/main/java/it/grid/storm/acl/AclManager.java @@ -8,11 +8,6 @@ import it.grid.storm.filesystem.LocalFile; import it.grid.storm.griduser.LocalUser; -/** - * @author Michele Dibenedetto - * - */ - public interface AclManager { /** @@ -24,7 +19,7 @@ public interface AclManager { * a not existent file */ FilesystemPermission grantGroupPermission(LocalFile localFile, LocalUser localUser, - FilesystemPermission permission) throws IllegalArgumentException; + FilesystemPermission permission); /** * @param localFile an existent file if received null parameters or the LocalFile object refers to @@ -36,7 +31,7 @@ FilesystemPermission grantGroupPermission(LocalFile localFile, LocalUser localUs * a not existent file */ FilesystemPermission grantUserPermission(LocalFile localFile, LocalUser localUser, - FilesystemPermission permission) throws IllegalArgumentException; + FilesystemPermission permission); /** * @param localFile an existent file @@ -45,8 +40,7 @@ FilesystemPermission grantUserPermission(LocalFile localFile, LocalUser localUse * @throws IllegalArgumentException if received null parameters or the LocalFile object refers to * a not existent file */ - FilesystemPermission removeGroupPermission(LocalFile localFile, LocalUser localUser) - throws IllegalArgumentException; + FilesystemPermission removeGroupPermission(LocalFile localFile, LocalUser localUser); /** * @param localFile an existent file @@ -55,8 +49,7 @@ FilesystemPermission removeGroupPermission(LocalFile localFile, LocalUser localU * @throws IllegalArgumentException if received null parameters or the LocalFile object refers to * a not existent file */ - FilesystemPermission removeUserPermission(LocalFile localFile, LocalUser localUser) - throws IllegalArgumentException; + FilesystemPermission removeUserPermission(LocalFile localFile, LocalUser localUser); /** * @param localFile an existent file @@ -67,7 +60,7 @@ FilesystemPermission removeUserPermission(LocalFile localFile, LocalUser localUs * a not existent file */ FilesystemPermission revokeGroupPermission(LocalFile localFile, LocalUser localUser, - FilesystemPermission permission) throws IllegalArgumentException; + FilesystemPermission permission); /** * @param localFile an existent file @@ -78,7 +71,7 @@ FilesystemPermission revokeGroupPermission(LocalFile localFile, LocalUser localU * a not existent file */ FilesystemPermission revokeUserPermission(LocalFile localFile, LocalUser localUser, - FilesystemPermission permission) throws IllegalArgumentException; + FilesystemPermission permission); /** * @param localFile an existent file @@ -89,7 +82,7 @@ FilesystemPermission revokeUserPermission(LocalFile localFile, LocalUser localUs * a not existent file */ FilesystemPermission setGroupPermission(LocalFile localFile, LocalUser localUser, - FilesystemPermission permission) throws IllegalArgumentException; + FilesystemPermission permission); /** * @param localFile an existent file @@ -100,14 +93,14 @@ FilesystemPermission setGroupPermission(LocalFile localFile, LocalUser localUser * a not existent file */ FilesystemPermission setUserPermission(LocalFile localFile, LocalUser localUser, - FilesystemPermission permission) throws IllegalArgumentException; + FilesystemPermission permission); /** * @param localFile an existent file * @throws IllegalArgumentException if received null parameters or the LocalFile object refers to * a not existent file */ - void removeHttpsPermissions(LocalFile localFile) throws IllegalArgumentException; + void removeHttpsPermissions(LocalFile localFile); /** * @param localFile an existent file @@ -117,7 +110,7 @@ FilesystemPermission setUserPermission(LocalFile localFile, LocalUser localUser, * a not existent file */ void grantHttpsUserPermission(LocalFile localFile, LocalUser localUser, - FilesystemPermission permission) throws IllegalArgumentException; + FilesystemPermission permission); /** * @param localFile an existent file @@ -137,7 +130,7 @@ void grantHttpsServiceGroupPermission(LocalFile localFile, FilesystemPermission * a not existent file */ void grantHttpsGroupPermission(LocalFile localFile, LocalUser localUser, - FilesystemPermission permission) throws IllegalArgumentException; + FilesystemPermission permission); /** * @param localFile an existent file @@ -146,8 +139,7 @@ void grantHttpsGroupPermission(LocalFile localFile, LocalUser localUser, * @throws IllegalArgumentException if received null parameters or the LocalFile object refers to * a not existent file */ - void grantHttpsServiceUserPermission(LocalFile localFile, FilesystemPermission permission) - throws IllegalArgumentException; + void grantHttpsServiceUserPermission(LocalFile localFile, FilesystemPermission permission); /** * @param oldLocalFile an existent source file @@ -155,7 +147,6 @@ void grantHttpsServiceUserPermission(LocalFile localFile, FilesystemPermission p * @throws IllegalArgumentException if received null parameters or the LocalFile objects refers to * not existent files */ - void moveHttpsPermissions(LocalFile oldLocalFile, LocalFile newLocalFile) - throws IllegalArgumentException; + void moveHttpsPermissions(LocalFile oldLocalFile, LocalFile newLocalFile); } diff --git a/src/main/java/it/grid/storm/acl/NoAclManager.java b/src/main/java/it/grid/storm/acl/NoAclManager.java new file mode 100644 index 000000000..0351ebfa4 --- /dev/null +++ b/src/main/java/it/grid/storm/acl/NoAclManager.java @@ -0,0 +1,187 @@ +package it.grid.storm.acl; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; + +import it.grid.storm.filesystem.FilesystemPermission; +import it.grid.storm.filesystem.LocalFile; +import it.grid.storm.griduser.LocalUser; + +public class NoAclManager implements AclManager { + + @Override + public FilesystemPermission grantGroupPermission(LocalFile localFile, LocalUser localUser, + FilesystemPermission permission) { + + checkNotNull(localFile, "Unable to grant group permission on null local file"); + checkNotNull(localUser, "Unable to grant group permission on null local user"); + checkNotNull(permission, "Unable to grant group permission with null permission"); + checkArgument(localFile.exists(), + "Unable to grant group permission on a non existent local file: " + + localFile.getAbsolutePath()); + + return localFile.getEffectiveGroupPermission(localUser); + } + + @Override + public FilesystemPermission grantUserPermission(LocalFile localFile, LocalUser localUser, + FilesystemPermission permission) { + + checkNotNull(localFile, "Unable to grant user permission on null local file"); + checkNotNull(localUser, "Unable to grant user permission on null local user"); + checkNotNull(permission, "Unable to grant user permission with null permission"); + checkArgument(localFile.exists(), + "Unable to grant user permission on a non existent local file: " + + localFile.getAbsolutePath()); + + return localFile.getEffectiveUserPermission(localUser); + } + + @Override + public FilesystemPermission removeGroupPermission(LocalFile localFile, LocalUser localUser) { + + checkNotNull(localFile, "Unable to remove group permission on null local file"); + checkNotNull(localUser, "Unable to remove group permission on null local user"); + checkArgument(localFile.exists(), + "Unable to remove group permission on a non existent local file: " + + localFile.getAbsolutePath()); + + return localFile.getEffectiveGroupPermission(localUser); + } + + @Override + public FilesystemPermission removeUserPermission(LocalFile localFile, LocalUser localUser) { + + checkNotNull(localFile, "Unable to remove user permission on null local file"); + checkNotNull(localUser, "Unable to remove user permission on null local user"); + checkArgument(localFile.exists(), + "Unable to remove user permission on a non existent local file: " + + localFile.getAbsolutePath()); + + return localFile.getEffectiveUserPermission(localUser); + } + + @Override + public FilesystemPermission revokeGroupPermission(LocalFile localFile, LocalUser localUser, + FilesystemPermission permission) { + + checkNotNull(localFile, "Unable to revoke group permission on null local file"); + checkNotNull(localUser, "Unable to revoke group permission on null local user"); + checkNotNull(permission, "Unable to revoke group permission with null permission"); + checkArgument(localFile.exists(), + "Unable to revoke group permission on a non existent local file: " + + localFile.getAbsolutePath()); + + return localFile.getEffectiveGroupPermission(localUser); + } + + @Override + public FilesystemPermission revokeUserPermission(LocalFile localFile, LocalUser localUser, + FilesystemPermission permission) { + + checkNotNull(localFile, "Unable to revoke user permission on null local file"); + checkNotNull(localUser, "Unable to revoke user permission on null local user"); + checkNotNull(permission, "Unable to revoke user permission with null permission"); + checkArgument(localFile.exists(), + "Unable to revoke user permission on a non existent local file: " + + localFile.getAbsolutePath()); + + return localFile.getEffectiveUserPermission(localUser); + } + + @Override + public FilesystemPermission setGroupPermission(LocalFile localFile, LocalUser localUser, + FilesystemPermission permission) { + + checkNotNull(localFile, "Unable to set group permission on null local file"); + checkNotNull(localUser, "Unable to set group permission on null local user"); + checkNotNull(permission, "Unable to set group permission with null permission"); + checkArgument(localFile.exists(), + "Unable to set group permission on a non existent local file: " + + localFile.getAbsolutePath()); + + return localFile.getEffectiveGroupPermission(localUser); + } + + @Override + public FilesystemPermission setUserPermission(LocalFile localFile, LocalUser localUser, + FilesystemPermission permission) { + + checkNotNull(localFile, "Unable to set user permission on null local file"); + checkNotNull(localUser, "Unable to set user permission on null local user"); + checkNotNull(permission, "Unable to set user permission with null permission"); + checkArgument(localFile.exists(), "Unable to set user permission on a non existent local file: " + + localFile.getAbsolutePath()); + + return localFile.getEffectiveUserPermission(localUser); + } + + @Override + public void removeHttpsPermissions(LocalFile localFile) { + + checkNotNull(localFile, "Unable to remove https permission on null local file"); + checkArgument(localFile.exists(), + "Unable to remove httès permission on a non existent local file: " + + localFile.getAbsolutePath()); + } + + @Override + public void grantHttpsUserPermission(LocalFile localFile, LocalUser localUser, + FilesystemPermission permission) { + + checkNotNull(localFile, "Unable to grant https user permission on null local file"); + checkNotNull(localUser, "Unable to grant https user permission on null local user"); + checkNotNull(permission, "Unable to grant https user permission with null permission"); + checkArgument(localFile.exists(), + "Unable to grant https user permission on a non existent local file: " + + localFile.getAbsolutePath()); + } + + @Override + public void grantHttpsServiceGroupPermission(LocalFile localFile, + FilesystemPermission permission) { + + checkNotNull(localFile, "Unable to grant https service group permission on null local file"); + checkNotNull(permission, "Unable to grant https service group permission with null permission"); + checkArgument(localFile.exists(), + "Unable to grant https service group permission on a non existent local file: " + + localFile.getAbsolutePath()); + } + + @Override + public void grantHttpsGroupPermission(LocalFile localFile, LocalUser localUser, + FilesystemPermission permission) { + + checkNotNull(localFile, "Unable to grant https group permission on null local file"); + checkNotNull(localUser, "Unable to grant https group permission on null local user"); + checkNotNull(permission, "Unable to grant https group permission with null permission"); + checkArgument(localFile.exists(), + "Unable to grant https group permission on a non existent local file: " + + localFile.getAbsolutePath()); + } + + @Override + public void grantHttpsServiceUserPermission(LocalFile localFile, + FilesystemPermission permission) { + + checkNotNull(localFile, "Unable to grant https service user permission on null local file"); + checkNotNull(permission, "Unable to grant https service user permission with null permission"); + checkArgument(localFile.exists(), + "Unable to grant https service user permission on a non existent local file: " + + localFile.getAbsolutePath()); + } + + @Override + public void moveHttpsPermissions(LocalFile oldLocalFile, LocalFile newLocalFile) { + + checkNotNull(oldLocalFile, "Unable to move https permission on null local source file"); + checkNotNull(newLocalFile, "Unable to move https permission on null local destination file"); + checkArgument(oldLocalFile.exists(), + "Unable to move https permission on a non existent source local file: " + + oldLocalFile.getAbsolutePath()); + checkArgument(newLocalFile.exists(), + "Unable to move https permission on a non existent destination local file: " + + newLocalFile.getAbsolutePath()); + } + +} diff --git a/src/main/java/it/grid/storm/asynch/AdvancedPicker.java b/src/main/java/it/grid/storm/asynch/AdvancedPicker.java index 121a32d38..896f196a0 100644 --- a/src/main/java/it/grid/storm/asynch/AdvancedPicker.java +++ b/src/main/java/it/grid/storm/asynch/AdvancedPicker.java @@ -5,8 +5,8 @@ package it.grid.storm.asynch; import it.grid.storm.catalogs.RequestSummaryCatalog; -import it.grid.storm.catalogs.RequestSummaryData; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.scheduler.CrusherScheduler; import it.grid.storm.scheduler.SchedulerException; import it.grid.storm.scheduler.SchedulerStatus; @@ -40,10 +40,10 @@ public class AdvancedPicker { private TimerTask retrievingTask = null; /* delay time before starting retriever thread, in mssec */ - private final long delay = Configuration.getInstance().getPickingInitialDelay() * 1000; + private final long delay = StormConfiguration.getInstance().getPickingInitialDelay() * 1000; /* period of execution of retrieving, in mssec */ - private final long period = Configuration.getInstance().getPickingTimeInterval() * 1000; + private final long period = StormConfiguration.getInstance().getPickingTimeInterval() * 1000; /* boolean that indicates there is a token to abort! */ private boolean abort = false; diff --git a/src/main/java/it/grid/storm/asynch/BoL.java b/src/main/java/it/grid/storm/asynch/BoL.java index 1bd36fcb6..65827d126 100644 --- a/src/main/java/it/grid/storm/asynch/BoL.java +++ b/src/main/java/it/grid/storm/asynch/BoL.java @@ -4,24 +4,27 @@ */ package it.grid.storm.asynch; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.authz.AuthzDirector; import it.grid.storm.authz.SpaceAuthzInterface; import it.grid.storm.authz.sa.model.SRMSpaceRequest; -import it.grid.storm.catalogs.BoLData; -import it.grid.storm.catalogs.RequestData; +import it.grid.storm.catalogs.TapeRecallCatalog; import it.grid.storm.catalogs.surl.SURLStatusManager; import it.grid.storm.catalogs.surl.SURLStatusManagerFactory; -import it.grid.storm.common.types.SizeUnit; import it.grid.storm.ea.StormEA; import it.grid.storm.filesystem.FSException; import it.grid.storm.filesystem.LocalFile; import it.grid.storm.griduser.AbstractGridUser; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; +import it.grid.storm.persistence.model.BoLData; +import it.grid.storm.persistence.model.RequestData; import it.grid.storm.scheduler.Chooser; import it.grid.storm.scheduler.Delegable; import it.grid.storm.scheduler.Streets; @@ -31,17 +34,13 @@ import it.grid.storm.srm.types.TSizeInBytes; import it.grid.storm.srm.types.TSpaceToken; import it.grid.storm.srm.types.TStatusCode; -import it.grid.storm.tape.recalltable.TapeRecallCatalog; import it.grid.storm.tape.recalltable.model.TapeRecallStatus; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * Class that represents a chunk of an srmBringOnLine request: it handles a single file of a - * multifile/directory-expansion request. StoRM then sends the chunk to a chunk-scheduler. Security + * multi-file/directory expansion request. StoRM then sends the chunk to a chunk-scheduler. Security * checks performed as follows: both in the JiT and AoT approach, policies are checked to see if the - * Griduser has read rights on the requested SURL. If the AuthorisationCollector replies with an + * grid-user has read rights on the requested SURL. If the AuthorisationCollector replies with an * isDeny, then the request fails with SRM_AUTHORIZATION_FAILURE status. If the * AuthorisationCollector replies with isIndeterminate, then the request fails with SRM_FAILURE and * explanation string "Failure in PolicySource prevented PolicyCollector from establishing access @@ -56,21 +55,21 @@ * the grid credentials get mapped; the TURL finally gets constructed. If the local file does not * exist the request fails with SRM_INVALID_PATH and corresponding explanation string; if the user * cannot be mapped locally, the request fails with SRM_FAILURE and an explanation String which - * includes the DN used for maping; if there are internal problems constructing the TURL again the + * includes the DN used for mapping; if there are internal problems constructing the TURL again the * request fails with SRM_FAILURE. Appropriate error messages get logged. (2) Traverse permissions * get set on all parent directories to allow access to the file. The operation may fail for several * reasons: the file or any of the parent directories may have been removed resulting in - * SRM_INVALID_PATH; StoRM cannot set the requested permissions because a filesystem mask does not - * allow the permissions to be set up; StoRM may be configured for the wrong filesystem; StoRM has - * not got the right permissions to manipulate the ACLs on the filesystem; StoRM may have - * encountered an unexpected error when working with the filesystem. In all these circumstances, the + * SRM_INVALID_PATH; StoRM cannot set the requested permissions because a file-system mask does not + * allow the permissions to be set up; StoRM may be configured for the wrong file-system; StoRM has + * not got the right permissions to manipulate the ACLs on the file-system; StoRM may have + * encountered an unexpected error when working with the file-system. In all these circumstances, the * status changes to SRM_FAILURE, together with an appropriate explanation String, and a respective * log message. (3) The file size is determined. The operation may fail and hence the request too * gets failed, in the following circumstances: the file somehow does not exist, the path to the - * file is not found, an error while communicating with the underlaying FileSystem, or a JVM + * file is not found, an error while communicating with the underlying FileSystem, or a JVM * SecurityManager forbids such operation. In the first two cases the state changes to * SRM_INVALID_PATH, while in the other ones it changes to SRM_FAILURE; proper error strings explain - * the situation further. Error messages get logged. (3) If AoT acls are in place, then the + * the situation further. Error messages get logged. (3) If AoT ACLs are in place, then the * PinnedFilesCatalog is asked to pinExistingVolatileEntry, that is, it is asked to pin the entry if * it is already present thereby extending its lifetime (if it is not present, it just means that * the requested file is PERMANENT and there is no need to pin it); status changes to @@ -189,7 +188,7 @@ public void doIt() { StoRI fileStoRI = null; try { - fileStoRI = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl, gu); + fileStoRI = Namespace.getInstance().resolveStoRIbySURL(surl, gu); } catch (IllegalArgumentException e) { log.error( "Unable to build a stori for surl '{}' and user '{}'. " + "IllegalArgumentException: {}", @@ -306,7 +305,7 @@ private void manageIsPermit(StoRI fileStoRI) { StormEA.setPinned(localFile.getAbsolutePath(), expDate); - requestData.setFileSize(TSizeInBytes.make(localFile.length(), SizeUnit.BYTES)); + requestData.setFileSize(TSizeInBytes.make(localFile.length())); if (isStoriOndisk(fileStoRI)) { @@ -319,7 +318,7 @@ private void manageIsPermit(StoRI fileStoRI) { if (gu instanceof AbstractGridUser) { voName = ((AbstractGridUser) gu).getVO().getValue(); } - new TapeRecallCatalog().insertTask(this, voName, localFile.getAbsolutePath()); + TapeRecallCatalog.getInstance().insertTask(this, voName, localFile.getAbsolutePath()); backupData(localFile); } diff --git a/src/main/java/it/grid/storm/asynch/BoLFeeder.java b/src/main/java/it/grid/storm/asynch/BoLFeeder.java index a947e7ac8..15ec6ee0c 100644 --- a/src/main/java/it/grid/storm/asynch/BoLFeeder.java +++ b/src/main/java/it/grid/storm/asynch/BoLFeeder.java @@ -5,19 +5,19 @@ package it.grid.storm.asynch; import it.grid.storm.catalogs.BoLChunkCatalog; -import it.grid.storm.catalogs.BoLPersistentChunkData; -import it.grid.storm.catalogs.InvalidSurlRequestDataAttributesException; import it.grid.storm.catalogs.RequestSummaryCatalog; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.namespace.InvalidDescendantsEmptyRequestException; import it.grid.storm.namespace.InvalidDescendantsFileRequestException; import it.grid.storm.namespace.InvalidDescendantsPathRequestException; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.persistence.model.BoLPersistentChunkData; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.scheduler.Delegable; import it.grid.storm.scheduler.SchedulerException; import it.grid.storm.srm.types.InvalidTDirOptionAttributesException; @@ -234,7 +234,7 @@ private void manageIsDirectory(BoLPersistentChunkData chunkData) { StoRI stori = null; try { - stori = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl, gu); + stori = Namespace.getInstance().resolveStoRIbySURL(surl, gu); } catch (IllegalArgumentException e) { log.error( "Unable to build a stori for surl {} for user {}. " + "IllegalArgumentException: {}", diff --git a/src/main/java/it/grid/storm/asynch/BoLPersistentChunk.java b/src/main/java/it/grid/storm/asynch/BoLPersistentChunk.java index 67a74cb7e..23696a88f 100644 --- a/src/main/java/it/grid/storm/asynch/BoLPersistentChunk.java +++ b/src/main/java/it/grid/storm/asynch/BoLPersistentChunk.java @@ -5,9 +5,9 @@ package it.grid.storm.asynch; import it.grid.storm.catalogs.BoLChunkCatalog; -import it.grid.storm.catalogs.BoLPersistentChunkData; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.BoLPersistentChunkData; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.scheduler.PersistentRequestChunk; import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.tape.recalltable.model.TapeRecallStatus; diff --git a/src/main/java/it/grid/storm/asynch/GlobalStatusManager.java b/src/main/java/it/grid/storm/asynch/GlobalStatusManager.java index bd41713b3..ae5f16bd4 100644 --- a/src/main/java/it/grid/storm/asynch/GlobalStatusManager.java +++ b/src/main/java/it/grid/storm/asynch/GlobalStatusManager.java @@ -4,8 +4,8 @@ */ package it.grid.storm.asynch; -import it.grid.storm.catalogs.ChunkData; import it.grid.storm.catalogs.RequestSummaryCatalog; +import it.grid.storm.persistence.model.ChunkData; import it.grid.storm.srm.types.TRequestToken; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TStatusCode; diff --git a/src/main/java/it/grid/storm/asynch/InvalidBoLChunkAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidBoLChunkAttributesException.java index 2ffe5b880..90128667d 100644 --- a/src/main/java/it/grid/storm/asynch/InvalidBoLChunkAttributesException.java +++ b/src/main/java/it/grid/storm/asynch/InvalidBoLChunkAttributesException.java @@ -4,9 +4,9 @@ */ package it.grid.storm.asynch; -import it.grid.storm.catalogs.BoLPersistentChunkData; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.BoLPersistentChunkData; +import it.grid.storm.persistence.model.RequestSummaryData; /** * This class represents an Exception thrown when a BoLChunk is created with any null attribute: diff --git a/src/main/java/it/grid/storm/asynch/InvalidBoLFeederAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidBoLFeederAttributesException.java index 150c1274c..e25e253dc 100644 --- a/src/main/java/it/grid/storm/asynch/InvalidBoLFeederAttributesException.java +++ b/src/main/java/it/grid/storm/asynch/InvalidBoLFeederAttributesException.java @@ -4,8 +4,8 @@ */ package it.grid.storm.asynch; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.RequestSummaryData; /** * Class that represents an Exception thrown when a BoLFeeder could not be created because the diff --git a/src/main/java/it/grid/storm/asynch/InvalidPersistentRequestAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPersistentRequestAttributesException.java index c8a8241dc..094923526 100644 --- a/src/main/java/it/grid/storm/asynch/InvalidPersistentRequestAttributesException.java +++ b/src/main/java/it/grid/storm/asynch/InvalidPersistentRequestAttributesException.java @@ -4,9 +4,9 @@ */ package it.grid.storm.asynch; -import it.grid.storm.catalogs.PersistentChunkData; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.PersistentChunkData; +import it.grid.storm.persistence.model.RequestSummaryData; /** * This class represents an Exceptin thrown when a PtPChunk is created with any null attribute: diff --git a/src/main/java/it/grid/storm/asynch/InvalidPtGAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPtGAttributesException.java index 4f2c87c66..eea11cd30 100644 --- a/src/main/java/it/grid/storm/asynch/InvalidPtGAttributesException.java +++ b/src/main/java/it/grid/storm/asynch/InvalidPtGAttributesException.java @@ -4,8 +4,8 @@ */ package it.grid.storm.asynch; -import it.grid.storm.catalogs.PtGData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.PtGData; /** * @author Michele Dibenedetto diff --git a/src/main/java/it/grid/storm/asynch/InvalidPtGChunkAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPtGChunkAttributesException.java index 97f805f00..f9fe22944 100644 --- a/src/main/java/it/grid/storm/asynch/InvalidPtGChunkAttributesException.java +++ b/src/main/java/it/grid/storm/asynch/InvalidPtGChunkAttributesException.java @@ -4,9 +4,9 @@ */ package it.grid.storm.asynch; -import it.grid.storm.catalogs.RequestSummaryData; -import it.grid.storm.catalogs.PtGData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.PtGData; +import it.grid.storm.persistence.model.RequestSummaryData; /** * This class represents an Exceptin thrown when a PtGChunk is created with any null attribute: diff --git a/src/main/java/it/grid/storm/asynch/InvalidPtGFeederAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPtGFeederAttributesException.java index c9e1bb8eb..01450cf52 100644 --- a/src/main/java/it/grid/storm/asynch/InvalidPtGFeederAttributesException.java +++ b/src/main/java/it/grid/storm/asynch/InvalidPtGFeederAttributesException.java @@ -4,8 +4,8 @@ */ package it.grid.storm.asynch; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.RequestSummaryData; /** * Class that represents an Exception thrown when a PtGFeeder could not be created because the diff --git a/src/main/java/it/grid/storm/asynch/InvalidPtPFeederAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPtPFeederAttributesException.java index cc565bc26..f61f824bd 100644 --- a/src/main/java/it/grid/storm/asynch/InvalidPtPFeederAttributesException.java +++ b/src/main/java/it/grid/storm/asynch/InvalidPtPFeederAttributesException.java @@ -4,8 +4,8 @@ */ package it.grid.storm.asynch; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.RequestSummaryData; /** * Class that represents an Exception thrown when a PtPFeeder could not be created because the diff --git a/src/main/java/it/grid/storm/asynch/InvalidRequestAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidRequestAttributesException.java index 0616e6376..e7a389e86 100644 --- a/src/main/java/it/grid/storm/asynch/InvalidRequestAttributesException.java +++ b/src/main/java/it/grid/storm/asynch/InvalidRequestAttributesException.java @@ -4,8 +4,8 @@ */ package it.grid.storm.asynch; -import it.grid.storm.catalogs.RequestData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.RequestData; /** * @author Michele Dibenedetto diff --git a/src/main/java/it/grid/storm/asynch/PtG.java b/src/main/java/it/grid/storm/asynch/PtG.java index 8396823c1..96d0dcf8a 100644 --- a/src/main/java/it/grid/storm/asynch/PtG.java +++ b/src/main/java/it/grid/storm/asynch/PtG.java @@ -17,12 +17,11 @@ import it.grid.storm.authz.SpaceAuthzInterface; import it.grid.storm.authz.path.model.SRMFileRequest; import it.grid.storm.authz.sa.model.SRMSpaceRequest; -import it.grid.storm.catalogs.PtGData; +import it.grid.storm.catalogs.TapeRecallCatalog; import it.grid.storm.catalogs.VolatileAndJiTCatalog; import it.grid.storm.catalogs.surl.SURLStatusManager; import it.grid.storm.catalogs.surl.SURLStatusManagerFactory; -import it.grid.storm.common.types.SizeUnit; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; import it.grid.storm.ea.StormEA; import it.grid.storm.filesystem.FSException; import it.grid.storm.filesystem.FilesystemPermission; @@ -32,7 +31,7 @@ import it.grid.storm.griduser.LocalUser; import it.grid.storm.namespace.InvalidGetTURLProtocolException; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.TURLBuildingException; @@ -42,6 +41,7 @@ import it.grid.storm.namespace.model.Protocol; import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.model.PtGData; import it.grid.storm.scheduler.Chooser; import it.grid.storm.scheduler.Delegable; import it.grid.storm.scheduler.Streets; @@ -55,7 +55,6 @@ import it.grid.storm.synchcall.command.CommandHelper; import it.grid.storm.synchcall.data.DataHelper; import it.grid.storm.synchcall.data.IdentityInputData; -import it.grid.storm.tape.recalltable.TapeRecallCatalog; import it.grid.storm.tape.recalltable.model.TapeRecallStatus; public class PtG implements Delegable, Chooser, Request, Suspendedable { @@ -70,12 +69,12 @@ public class PtG implements Delegable, Chooser, Request, Suspendedable { protected PtGData requestData; /** - * Time that wil be used in all jit and volatile tracking. + * Time that will be used in all JiT and volatile tracking. */ protected final Calendar start; /** - * boolean that indicates the state of the shunk is failure + * boolean that indicates the state of the chunk is failure */ protected boolean failure = false; @@ -105,7 +104,7 @@ public PtG(PtGData reqData) throws IllegalArgumentException { requestData = reqData; start = Calendar.getInstance(); - if (Configuration.getInstance().getPTGSkipACLSetup()) { + if (StormConfiguration.getInstance().getPTGSkipACLSetup()) { setupACLs = false; log.debug("Skipping ACL setup on PTG as requested by configuration."); } @@ -139,7 +138,7 @@ public void doIt() { try { if (!downgradedToAnonymous && requestData instanceof IdentityInputData) { try { - fileStoRI = NamespaceDirector.getNamespace() + fileStoRI = Namespace.getInstance() .resolveStoRIbySURL(surl, ((IdentityInputData) requestData).getUser()); } catch (UnapprochableSurlException e) { unapprochableSurl = true; @@ -158,7 +157,7 @@ public void doIt() { } } else { try { - fileStoRI = NamespaceDirector.getNamespace().resolveStoRIbySURL(requestData.getSURL()); + fileStoRI = Namespace.getInstance().resolveStoRIbySURL(requestData.getSURL()); } catch (UnapprochableSurlException e) { failure = true; log.info("Unable to build a stori for surl {}. " + "UnapprochableSurlException: {}", surl, @@ -195,7 +194,7 @@ public void doIt() { } else { if (requestData.getTransferProtocols().allows(Protocol.HTTP)) { try { - fileStoRI = NamespaceDirector.getNamespace().resolveStoRIbySURL(requestData.getSURL()); + fileStoRI = Namespace.getInstance().resolveStoRIbySURL(requestData.getSURL()); } catch (UnapprochableSurlException e) { failure = true; log.info("Unable to build a stori for surl {}. " + "UnapprochableSurlException: {}", @@ -334,8 +333,7 @@ private void manageIsPermit(StoRI fileStoRI) { try { - TSizeInBytes fileSize = - TSizeInBytes.make(fileStoRI.getLocalFile().length(), SizeUnit.BYTES); + TSizeInBytes fileSize = TSizeInBytes.make(fileStoRI.getLocalFile().length()); requestData.setFileSize(fileSize); log.debug("File size: {}", fileSize); @@ -369,8 +367,8 @@ private void manageIsPermit(StoRI fileStoRI) { } } try { - new TapeRecallCatalog().insertTask(this, voName, - fileStoRI.getLocalFile().getAbsolutePath()); + TapeRecallCatalog.getInstance() + .insertTask(this, voName, fileStoRI.getLocalFile().getAbsolutePath()); } catch (DataAccessException e) { requestData.changeStatusSRM_FAILURE("Unable to request file recall from tape"); failure = true; @@ -423,8 +421,7 @@ private void manageIsPermit(StoRI fileStoRI) { if (canRead) { try { - TSizeInBytes fileSize = - TSizeInBytes.make(fileStoRI.getLocalFile().length(), SizeUnit.BYTES); + TSizeInBytes fileSize = TSizeInBytes.make(fileStoRI.getLocalFile().length()); requestData.setFileSize(fileSize); log.debug("File size: {}", fileSize); @@ -486,8 +483,9 @@ private boolean managePermitTraverseStep(StoRI fileStoRI) throws CannotMapUserEx if (!downgradedToAnonymous && requestData instanceof IdentityInputData) { - if (!setupACLs) + if (!setupACLs) { return verifyPath(fileStoRI); + } return verifyPath(fileStoRI) && setParentsAcl(fileStoRI, ((IdentityInputData) requestData).getUser().getLocalUser()); diff --git a/src/main/java/it/grid/storm/asynch/PtGBuilder.java b/src/main/java/it/grid/storm/asynch/PtGBuilder.java index bf89cac09..545e08768 100644 --- a/src/main/java/it/grid/storm/asynch/PtGBuilder.java +++ b/src/main/java/it/grid/storm/asynch/PtGBuilder.java @@ -4,13 +4,13 @@ */ package it.grid.storm.asynch; -import it.grid.storm.catalogs.AnonymousPtGData; -import it.grid.storm.catalogs.IdentityPtGData; -import it.grid.storm.catalogs.InvalidFileTransferDataAttributesException; -import it.grid.storm.catalogs.InvalidPtGDataAttributesException; -import it.grid.storm.catalogs.InvalidSurlRequestDataAttributesException; -import it.grid.storm.catalogs.PtGData; import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtGDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.persistence.model.AnonymousPtGData; +import it.grid.storm.persistence.model.IdentityPtGData; +import it.grid.storm.persistence.model.PtGData; import it.grid.storm.srm.types.TDirOption; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TReturnStatus; diff --git a/src/main/java/it/grid/storm/asynch/PtGFeeder.java b/src/main/java/it/grid/storm/asynch/PtGFeeder.java index 5f39e9641..55398a154 100644 --- a/src/main/java/it/grid/storm/asynch/PtGFeeder.java +++ b/src/main/java/it/grid/storm/asynch/PtGFeeder.java @@ -4,19 +4,19 @@ */ package it.grid.storm.asynch; -import it.grid.storm.catalogs.InvalidSurlRequestDataAttributesException; import it.grid.storm.catalogs.PtGChunkCatalog; -import it.grid.storm.catalogs.PtGPersistentChunkData; import it.grid.storm.catalogs.RequestSummaryCatalog; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.namespace.InvalidDescendantsEmptyRequestException; import it.grid.storm.namespace.InvalidDescendantsFileRequestException; import it.grid.storm.namespace.InvalidDescendantsPathRequestException; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.persistence.model.PtGPersistentChunkData; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.scheduler.Delegable; import it.grid.storm.scheduler.SchedulerException; import it.grid.storm.srm.types.InvalidTDirOptionAttributesException; @@ -250,7 +250,7 @@ private void manageIsDirectory(PtGPersistentChunkData chunkData) { /* Build StoRI for current chunk */ StoRI stori = null; try { - stori = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl, chunkData.getUser()); + stori = Namespace.getInstance().resolveStoRIbySURL(surl, chunkData.getUser()); } catch (IllegalArgumentException e) { log.error( "Unable to build a stori for surl {} for user {}. " + "IllegalArgumentException: {}", diff --git a/src/main/java/it/grid/storm/asynch/PtGPersistentChunk.java b/src/main/java/it/grid/storm/asynch/PtGPersistentChunk.java index 8d01b7cbc..f58e3cbfb 100644 --- a/src/main/java/it/grid/storm/asynch/PtGPersistentChunk.java +++ b/src/main/java/it/grid/storm/asynch/PtGPersistentChunk.java @@ -6,9 +6,9 @@ import java.util.Arrays; import it.grid.storm.catalogs.PtGChunkCatalog; -import it.grid.storm.catalogs.PtGData; -import it.grid.storm.catalogs.PtGPersistentChunkData; -import it.grid.storm.catalogs.RequestSummaryData; +import it.grid.storm.persistence.model.PtGData; +import it.grid.storm.persistence.model.PtGPersistentChunkData; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.scheduler.PersistentRequestChunk; import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.synchcall.command.CommandHelper; diff --git a/src/main/java/it/grid/storm/asynch/PtP.java b/src/main/java/it/grid/storm/asynch/PtP.java index 111505c63..a154dc279 100644 --- a/src/main/java/it/grid/storm/asynch/PtP.java +++ b/src/main/java/it/grid/storm/asynch/PtP.java @@ -4,6 +4,8 @@ */ package it.grid.storm.asynch; +import static it.grid.storm.srm.types.TFileStorageType.VOLATILE; + import java.io.IOException; import java.util.Arrays; import java.util.Calendar; @@ -12,18 +14,19 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.base.Preconditions; + import it.grid.storm.acl.AclManagerFS; import it.grid.storm.authz.AuthzDecision; import it.grid.storm.authz.AuthzDirector; import it.grid.storm.authz.SpaceAuthzInterface; import it.grid.storm.authz.path.model.SRMFileRequest; import it.grid.storm.authz.sa.model.SRMSpaceRequest; -import it.grid.storm.catalogs.PtPData; import it.grid.storm.catalogs.ReservedSpaceCatalog; import it.grid.storm.catalogs.VolatileAndJiTCatalog; import it.grid.storm.catalogs.surl.SURLStatusManager; import it.grid.storm.catalogs.surl.SURLStatusManagerFactory; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; import it.grid.storm.ea.StormEA; import it.grid.storm.filesystem.FilesystemPermission; import it.grid.storm.filesystem.LocalFile; @@ -33,7 +36,7 @@ import it.grid.storm.namespace.ExpiredSpaceTokenException; import it.grid.storm.namespace.InvalidGetTURLProtocolException; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.TURLBuildingException; @@ -42,13 +45,13 @@ import it.grid.storm.namespace.model.DefaultACL; import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.model.PtPData; import it.grid.storm.persistence.model.TransferObjectDecodingException; import it.grid.storm.scheduler.Chooser; import it.grid.storm.scheduler.Delegable; import it.grid.storm.scheduler.Streets; import it.grid.storm.space.SpaceHelper; import it.grid.storm.space.StorageSpaceData; -import it.grid.storm.srm.types.TFileStorageType; import it.grid.storm.srm.types.TOverwriteMode; import it.grid.storm.srm.types.TRequestToken; import it.grid.storm.srm.types.TSURL; @@ -65,13 +68,13 @@ * multifile request. StoRM then sends the chunk to a chunk-scheduler. For an existing file: if * TOverwriteMode is set to Never, then the chunk fails with SRM_DUPLICATION_ERROR; if * TOverwriteMode is Always or WhenFilesAreDifferent, the file gets treated in the same fashion: - * moreover the behaviour is the same as for the case of a non existing file described later on, + * moreover the behavior is the same as for the case of a non existing file described later on, * except that the only policy check made is about the presence of write rights, instead of create - * rights, as well as erasing the file before going on with the processing - all previous data gets - * lost! If the SURL refers to a file that does not exist, the behaviour is identical whatever the + * rights, as well as erasing the file before going on with the processing all previous data gets + * lost! If the SURL refers to a file that does not exist, the behavior is identical whatever the * TOverwriteMode; in particular: AuthorisationCollector is queried for File Creation policies: if * it is set to Deny, then the chunk is failed with SRM_AUTHORIZATION_FAILURE. If it is set to - * Permit, the situation is decribed later on. For any other decisions, the chunk is failed with + * Permit, the situation is described later on. For any other decisions, the chunk is failed with * SRM_FAILURE: it is caused when the policy is missing so no decision can be made, or if there is a * problem querying the Policies, or any new state for the AuthorisationDecision is introduced but * the PtP logic is not updated. In case Create rights are granted, the presence of a space token @@ -83,12 +86,12 @@ * supplied, the space is allocated as requested and again a special mock reserve file gets created. * A Write ACL is setup on the file regardless of the Security Model (AoT or JiT); if the file is * specified as VOLATILE, it gets pinned in the PinnedFilesCatalog; if JiT is active, the ACL will - * live only for the given time interval. A TURL gets filled in, the status transits to + * live only for the given time interval. A TURL gets filled in, the status moves to * SRM_SPACE_AVAILABLE, and the PtPCatalog is updated. There are error situations which get handled * as follows: If the placeHolder file cannot be created, or the implicit reservation fails, or the - * supplied space token does not exist, the request fails and chenages state to SRM_FAILURE. If the + * supplied space token does not exist, the request fails and changes state to SRM_FAILURE. If the * setting up of the ACL fails, the request fails too and the state changes to SRM_FAILURE. - * Appropriate messagges get logged. + * Appropriate messages get logged. * * @author EGRID - ICTP Trieste * @date June, 2005 @@ -106,12 +109,12 @@ public class PtP implements Delegable, Chooser, Request { protected final PtPData requestData; /** - * Time that wil be used in all jit and volatile tracking. + * Time that will be used in all JiT and volatile tracking. */ protected final Calendar start; /** - * boolean that indicates the state of the shunk is failure + * boolean that indicates the state of the chunk is failure */ protected boolean failure = false; @@ -120,6 +123,11 @@ public class PtP implements Delegable, Chooser, Request { */ protected boolean spacefailure = false; + /** + * boolean that indicates if setting ACL on the 0-size file is necessary or not + */ + protected boolean setupACLs = true; + /** * Constructor requiring the VomsGridUser, the RequestSummaryData, the PtPChunkData about this * chunk, and the GlobalStatusManager. If the supplied attributes are null, an @@ -133,6 +141,11 @@ public PtP(PtPData chunkData) throws InvalidRequestAttributesException { } this.requestData = chunkData; start = Calendar.getInstance(); + + if (StormConfiguration.getInstance().getPTPSkipACLSetup()) { + setupACLs = false; + log.debug("Skipping ACL setup on PTP as requested by configuration."); + } } /** @@ -176,10 +189,10 @@ public void doIt() { try { if (requestData instanceof IdentityInputData) { - fileStoRI = NamespaceDirector.getNamespace() + fileStoRI = Namespace.getInstance() .resolveStoRIbySURL(surl, ((IdentityInputData) requestData).getUser()); } else { - fileStoRI = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl); + fileStoRI = Namespace.getInstance().resolveStoRIbySURL(surl); } } catch (UnapprochableSurlException e) { @@ -358,11 +371,11 @@ private void managePermit(StoRI fileStoRI) { requestData.changeStatusSRM_NOT_SUPPORTED( "Unable to build TURL with " + "specified transfer protocols!"); failure = true; - log.error("ERROR in PtPChunk! No valid transfer protocol found. {}", e.getMessage(), e); + log.error("ERROR in PtPChunk! No valid transfer protocol found. {}", e.getMessage()); return; } catch (TURLBuildingException e) { requestData.changeStatusSRM_FAILURE( - "Unable to build the TURL for the " + "provided transfer protocol"); + "Unable to build the TURL for the provided transfer protocol"); failure = true; log.error("ERROR in PtPChunk! There was a failure building the TURL. " + "TURLBuildingException: {} ", e.getMessage(), e); @@ -377,50 +390,103 @@ private void managePermit(StoRI fileStoRI) { "Unable to find local user for " + DataHelper.getRequestor(requestData)); failure = true; log.error( - "ERROR in PtGChunk! Unable to find LocalUser for {}! " + "CannotMapUserException: {}", - DataHelper.getRequestor(requestData), e.getMessage(), e); + "ERROR in PtGChunk! Unable to find LocalUser for {}! CannotMapUserException: {}", + DataHelper.getRequestor(requestData), e.getMessage()); return; } - if (canTraverse) { - // Use any reserved space which implies the existence of a - // file! - if (managePermitReserveSpaceStep(fileStoRI)) { - boolean canWrite; - try { - canWrite = managePermitSetFileStep(fileStoRI); - } catch (CannotMapUserException e) { - requestData.changeStatusSRM_FAILURE( - "Unable to find local user for " + DataHelper.getRequestor(requestData)); - failure = true; - log.error( - "ERROR in PtGChunk! Unable to find LocalUser for {}! " + "CannotMapUserException: {}", - DataHelper.getRequestor(requestData), e.getMessage(), e); - return; - } - if (!canWrite) { - // URGENT!!! - // roll back! ok3, ok2 and ok1 - } else { - log.debug( - "PTP CHUNK. Addition of ReadWrite ACL on file successfully " + "completed for {}", - fileStoRI.getAbsolutePath()); - requestData.setTransferURL(auxTURL); - requestData.changeStatusSRM_SPACE_AVAILABLE("srmPrepareToPut " + "successfully handled!"); - failure = false; - if (requestData.fileStorageType().equals(TFileStorageType.VOLATILE)) { - VolatileAndJiTCatalog.getInstance() - .trackVolatile(fileStoRI.getPFN(), Calendar.getInstance(), - requestData.fileLifetime()); - } - } + if (!canTraverse) { + failure = true; + requestData.changeStatusSRM_FAILURE("Unable to set up parent path"); + log.error("ERROR in PtGChunk! Unable to set up parent path"); + return; + } + if (!hasEnoughSpace(fileStoRI)) { + failure = true; + requestData.changeStatusSRM_FAILURE("Not enough space on storage area"); + log.error("ERROR in PtGChunk! Not enough space on storage area"); + return; + } + if (!setupACLs) { + log.debug("ACL setup and file creation skipped by configuration"); + requestData.setTransferURL(auxTURL); + requestData.changeStatusSRM_SPACE_AVAILABLE("srmPrepareToPut successfully handled!"); + failure = false; + return; + } + if (!managePermitReserveSpaceStep(fileStoRI)) { + failure = true; + requestData.changeStatusSRM_FAILURE("Unable to reserve space on storage area"); + log.error("ERROR in PtGChunk! Unable to reserve space on storage area"); + return; + } + boolean canWrite; + try { + canWrite = managePermitSetFileStep(fileStoRI); + } catch (CannotMapUserException e) { + requestData.changeStatusSRM_FAILURE( + "Unable to find local user for " + DataHelper.getRequestor(requestData)); + failure = true; + log.error("ERROR in PtGChunk! Unable to find LocalUser for {}! CannotMapUserException: {}", + DataHelper.getRequestor(requestData), e.getMessage()); + return; + } + if (canWrite) { + log.debug("PTP CHUNK. Addition of ReadWrite ACL on file successfully completed for {}", + fileStoRI.getAbsolutePath()); + requestData.setTransferURL(auxTURL); + requestData.changeStatusSRM_SPACE_AVAILABLE("srmPrepareToPut successfully handled!"); + failure = false; + if (VOLATILE.equals(requestData.fileStorageType())) { + VolatileAndJiTCatalog.getInstance() + .trackVolatile(fileStoRI.getPFN(), Calendar.getInstance(), requestData.fileLifetime()); + } + return; + } + } + + private boolean hasEnoughSpace(StoRI fileStoRI) { + + Preconditions.checkNotNull(fileStoRI.getVirtualFileSystem()); + VirtualFS fs = fileStoRI.getVirtualFileSystem(); + + if (!fs.getProperties().isOnlineSpaceLimited()) { + log.debug("{} has no online space limited!", fs.getAliasName()); + return true; + } + SpaceHelper sp = new SpaceHelper(); + if (sp.isSAFull(PtP.log, fileStoRI)) { + log.debug("{} is full!", fs.getAliasName()); + return false; + } + boolean isDiskUsageServiceEnabled = StormConfiguration.getInstance().getDiskUsageServiceEnabled(); + if (!sp.isSAInitialized(PtP.log, fileStoRI) && isDiskUsageServiceEnabled) { + /* Trust we got space, let the request pass */ + log.debug( + "PtPChunk: ReserveSpaceStep: the storage area space initialization is in progress, optimistic approach, considering we have enough space"); + return true; + } + TSizeInBytes size = requestData.expectedFileSize(); + if (size.isEmpty()) { + log.debug("Expected size is zero or non-available. We trust there's enough space"); + return true; + } + long freeSpace = sp.getSAFreeSpace(PtP.log, fileStoRI); + if (freeSpace != -1 && freeSpace <= size.value()) { + TSpaceToken SASpaceToken = sp.getTokenFromStoRI(PtP.log, fileStoRI); + if (SASpaceToken == null || SASpaceToken.isEmpty()) { + log.error( + "PtPChunk - ReserveSpaceStep: Unable to get a valid TSpaceToken for stori {} . Unable to verify storage area space initialization", + fileStoRI); + requestData.changeStatusSRM_FAILURE("No valid space token for the Storage Area"); + } else { - // URGENT!!! - // roll back! ok2 and ok1 + log.debug("PtPChunk - ReserveSpaceStep: no free space on Storage Area!"); + requestData.changeStatusSRM_FAILURE("No free space on Storage Area"); } - } else { - // URGENT!!! - // roll back ok1! + failure = true; + return false; } + return true; } /** @@ -433,12 +499,14 @@ private boolean managePermitTraverseStep(StoRI fileStoRI) throws CannotMapUserEx if (!preparePath(fileStoRI)) { return false; } - if (requestData instanceof IdentityInputData) { - LocalUser user = ((IdentityInputData) requestData).getUser().getLocalUser(); - return setParentAcl(fileStoRI, user); - } + if (setupACLs) { + if (requestData instanceof IdentityInputData) { + LocalUser user = ((IdentityInputData) requestData).getUser().getLocalUser(); + return setParentAcl(fileStoRI, user); + } - setHttpsServiceParentAcl(fileStoRI); + setHttpsServiceParentAcl(fileStoRI); + } return true; } @@ -461,7 +529,7 @@ private boolean preparePath(StoRI fileStoRI) { private boolean prepareDirectory(LocalFile dir) { boolean automaticDirectoryCreation = - Configuration.getInstance().getAutomaticDirectoryCreation(); + StormConfiguration.getInstance().getAutomaticDirectoryCreation(); if (dir.exists()) { if (!dir.isDirectory()) { @@ -501,7 +569,7 @@ private void updateUsedSpace(LocalFile dir) { VirtualFS vfs; try { - vfs = NamespaceDirector.getNamespace().resolveVFSbyLocalFile(dir); + vfs = Namespace.getInstance().resolveVFSbyLocalFile(dir); } catch (NamespaceException e) { log.error("srmPtP: Error during used space update - {}", e.getMessage()); return; @@ -634,7 +702,7 @@ private boolean setJiTAcl(StoRI fileStori, LocalUser localUser, FilesystemPermis private boolean setAoTAcl(StoRI fileStori, LocalUser localUser, FilesystemPermission permission) throws Exception { - log.debug("SrmMkdir: Adding AoT ACL {} to user {} for directory: '{}'", permission, localUser, + log.debug("SrmMkdir: Adding AoT ACL {} to user {} for file: '{}'", permission, localUser, fileStori.getAbsolutePath()); try { @@ -700,47 +768,6 @@ private boolean managePermitReserveSpaceStep(StoRI fileStoRI) { TSpaceToken spaceToken = requestData.getSpaceToken(); LocalFile localFile = fileStoRI.getLocalFile(); - // In case of SRM Storage Area limitation enabled, - // the Storage Area free size is retrieved from the database - // and the PtP fails if there is not enougth space. - - VirtualFS fs = fileStoRI.getVirtualFileSystem(); - - if (fs != null && fs.getProperties().isOnlineSpaceLimited()) { - SpaceHelper sp = new SpaceHelper(); - long freeSpace = sp.getSAFreeSpace(PtP.log, fileStoRI); - if ((sp.isSAFull(PtP.log, fileStoRI)) - || (!size.isEmpty() && ((freeSpace != -1) && (freeSpace <= size.value())))) { - /* Verify if the storage area space has been initialized */ - /* - * If is not initialized verify if the SpaceInfoManager is currently initializing this - * storage area - */ - TSpaceToken SASpaceToken = sp.getTokenFromStoRI(PtP.log, fileStoRI); - if (SASpaceToken == null || SASpaceToken.isEmpty()) { - log.error("PtPChunk - ReserveSpaceStep: Unable to get a valid " - + "TSpaceToken for stori {} . Unable to verify storage area space " - + "initialization", fileStoRI); - requestData.changeStatusSRM_FAILURE("No valid space token for the Storage Area"); - failure = true; - return false; - } else { - if (!sp.isSAInitialized(PtP.log, fileStoRI) - && Configuration.getInstance().getDiskUsageServiceEnabled()) { - /* Trust we got space, let the request pass */ - log.debug("PtPChunk: ReserveSpaceStep: the storage area space " - + "initialization is in progress, optimistic approach, considering " - + "we got enough space"); - } else { - log.debug("PtPChunk - ReserveSpaceStep: no free space on Storage Area!"); - requestData.changeStatusSRM_FAILURE("No free space on Storage Area"); - failure = true; - return false; - } - } - } - } - try { boolean fileWasCreated = localFile.createNewFile(); @@ -798,12 +825,9 @@ private boolean managePermitReserveSpaceStep(StoRI fileStoRI) { return true; } catch (SecurityException e) { - // file.createNewFile could not create file because the Java - // SecurityManager did not grant - // write premission! This indicates a possible conflict between a - // local system administrator - // who applied a strict local policy, and policies as specified by - // the PolicyCollector! + // file.createNewFile could not create file because the Java SecurityManager did not grant + // write permission! This indicates a possible conflict between a local system administrator + // who applied a strict local policy, and policies as specified by the PolicyCollector! requestData.changeStatusSRM_FAILURE("Space Management step in " + "srmPrepareToPut failed!"); failure = true; log.error("ERROR in PtPChunk! During space reservation step in PtP, " @@ -811,8 +835,7 @@ private boolean managePermitReserveSpaceStep(StoRI fileStoRI) { + "writing the file! ", localFile.toString(), e); return false; } catch (IOException e) { - // file.createNewFile could not create file because of a local IO - // Error! + // file.createNewFile could not create file because of a local IO Error! requestData.changeStatusSRM_FAILURE("Space Management step in " + "srmPrepareToPut failed!"); failure = true; log.error( @@ -844,9 +867,9 @@ private boolean managePermitReserveSpaceStep(StoRI fileStoRI) { log.info("PtPChunk execution failed. ExpiredSpaceTokenException: {}", e.getMessage()); return false; } catch (Exception e) { - // This could be thrown by Java from Filesystem component given that + // This could be thrown by Java from FileSystem component given that // there is GPFS under the hoods, but I do not know exactly how - // java.io.File behaves with an ACL capable filesystem!! + // java.io.File behaves with an ACL capable FileSystem!! requestData.changeStatusSRM_FAILURE("Space Management step in " + "srmPrepareToPut failed!"); failure = true; log.error( @@ -861,7 +884,7 @@ private boolean isExistingSpaceToken(TSpaceToken spaceToken) throws Exception { StorageSpaceData spaceData = null; try { - spaceData = new ReservedSpaceCatalog().getStorageSpace(spaceToken); + spaceData = ReservedSpaceCatalog.getInstance().getStorageSpace(spaceToken); } catch (TransferObjectDecodingException e) { log.error("Unable to build StorageSpaceData from StorageSpaceTO." + " TransferObjectDecodingException: {}", e.getMessage()); diff --git a/src/main/java/it/grid/storm/asynch/PtPBuilder.java b/src/main/java/it/grid/storm/asynch/PtPBuilder.java index f61c57f53..0ab5990b4 100644 --- a/src/main/java/it/grid/storm/asynch/PtPBuilder.java +++ b/src/main/java/it/grid/storm/asynch/PtPBuilder.java @@ -6,14 +6,15 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import it.grid.storm.catalogs.AnonymousPtPData; -import it.grid.storm.catalogs.IdentityPtPData; -import it.grid.storm.catalogs.InvalidFileTransferDataAttributesException; -import it.grid.storm.catalogs.InvalidPtPDataAttributesException; -import it.grid.storm.catalogs.InvalidSurlRequestDataAttributesException; -import it.grid.storm.catalogs.PtPData; + import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtPDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.persistence.model.AnonymousPtPData; +import it.grid.storm.persistence.model.IdentityPtPData; +import it.grid.storm.persistence.model.PtPData; import it.grid.storm.srm.types.TFileStorageType; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TOverwriteMode; @@ -42,7 +43,7 @@ public static PtP build(PrepareToPutInputData inputData) throws BuilderException TLifeTimeInSeconds pinLifetime = inputData.getDesiredPinLifetime(); TLifeTimeInSeconds fileLifetime = inputData.getDesiredFileLifetime(); TFileStorageType fileStorageType = TFileStorageType - .getTFileStorageType(Configuration.getInstance().getDefaultFileStorageType()); + .getTFileStorageType(StormConfiguration.getInstance().getDefaultFileStorageType()); TSpaceToken spaceToken = inputData.getTargetSpaceToken(); TSizeInBytes expectedFileSize = inputData.getFileSize(); TURLPrefix transferProtocols = inputData.getTransferProtocols(); diff --git a/src/main/java/it/grid/storm/asynch/PtPFeeder.java b/src/main/java/it/grid/storm/asynch/PtPFeeder.java index 40f1d0d6c..185943df5 100644 --- a/src/main/java/it/grid/storm/asynch/PtPFeeder.java +++ b/src/main/java/it/grid/storm/asynch/PtPFeeder.java @@ -5,10 +5,10 @@ package it.grid.storm.asynch; import it.grid.storm.catalogs.PtPChunkCatalog; -import it.grid.storm.catalogs.PtPPersistentChunkData; import it.grid.storm.catalogs.RequestSummaryCatalog; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.PtPPersistentChunkData; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.scheduler.Delegable; import it.grid.storm.scheduler.SchedulerException; import it.grid.storm.srm.types.TSURL; diff --git a/src/main/java/it/grid/storm/asynch/PtPPersistentChunk.java b/src/main/java/it/grid/storm/asynch/PtPPersistentChunk.java index 21cfcf5ee..79063e134 100644 --- a/src/main/java/it/grid/storm/asynch/PtPPersistentChunk.java +++ b/src/main/java/it/grid/storm/asynch/PtPPersistentChunk.java @@ -6,9 +6,9 @@ import java.util.Arrays; import it.grid.storm.catalogs.PtPChunkCatalog; -import it.grid.storm.catalogs.PtPData; -import it.grid.storm.catalogs.PtPPersistentChunkData; -import it.grid.storm.catalogs.RequestSummaryData; +import it.grid.storm.persistence.model.PtPData; +import it.grid.storm.persistence.model.PtPPersistentChunkData; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.scheduler.PersistentRequestChunk; import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.synchcall.command.CommandHelper; diff --git a/src/main/java/it/grid/storm/asynch/Suspendedable.java b/src/main/java/it/grid/storm/asynch/Suspendedable.java index 80564d107..5fdd55d31 100644 --- a/src/main/java/it/grid/storm/asynch/Suspendedable.java +++ b/src/main/java/it/grid/storm/asynch/Suspendedable.java @@ -4,7 +4,7 @@ */ package it.grid.storm.asynch; -import it.grid.storm.catalogs.RequestData; +import it.grid.storm.persistence.model.RequestData; import it.grid.storm.tape.recalltable.model.TapeRecallStatus; public interface Suspendedable { diff --git a/src/main/java/it/grid/storm/authz/AuthzDirector.java b/src/main/java/it/grid/storm/authz/AuthzDirector.java index c5340adcf..3c99a9acb 100644 --- a/src/main/java/it/grid/storm/authz/AuthzDirector.java +++ b/src/main/java/it/grid/storm/authz/AuthzDirector.java @@ -5,159 +5,157 @@ package it.grid.storm.authz; import java.io.File; -import java.util.ArrayList; -import java.util.HashMap; +import java.util.List; import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + import it.grid.storm.authz.path.PathAuthz; import it.grid.storm.authz.path.conf.PathAuthzDBReader; import it.grid.storm.authz.sa.AuthzDBReaderException; import it.grid.storm.authz.sa.SpaceDBAuthz; import it.grid.storm.authz.sa.test.MockSpaceAuthz; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; import it.grid.storm.namespace.model.SAAuthzType; import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.srm.types.TSpaceToken; public class AuthzDirector { - private static final Logger log = LoggerFactory - .getLogger(AuthzDirector.class); - private static String configurationPATH; - - // Map between 'SpaceToken' and the related 'SpaceAuthz' - private static Map spaceAuthzs = null; - - // PathAuthz is only one, shared by all SAs - private static PathAuthzInterface pathAuthz = null; - - /** - * Scan the Namespace.xml to retrieve the list of file AuthZDB to digest - */ - private static Map buildSpaceAuthzsMAP() { - - HashMap spaceAuthzMap = new HashMap(); - - // Retrieve the list of VFS from Namespace - NamespaceInterface ns = NamespaceDirector.getNamespace(); - ArrayList vfss; - try { - vfss = new ArrayList(ns.getAllDefinedVFS()); - for (VirtualFS vfs : vfss) { - String vfsName = vfs.getAliasName(); - SAAuthzType authzTp = vfs.getStorageAreaAuthzType(); - String authzName = ""; - if (authzTp.equals(SAAuthzType.AUTHZDB)) { - // The Space Authz is based on Authz DB - authzName = vfs.getStorageAreaAuthzDB(); - log.debug("Loading AuthzDB '{}'", authzName); - if (existsAuthzDBFile(authzName)) { - // Digest the Space AuthzDB File - TSpaceToken spaceToken = vfs.getSpaceToken(); - SpaceAuthzInterface spaceAuthz = new SpaceDBAuthz(authzName); - spaceAuthzMap.put(spaceToken, spaceAuthz); - } else { - log.error("File AuthzDB '{}' related to '{}' does not exists.", - authzName, vfsName); - } - } else { - authzName = vfs.getStorageAreaAuthzFixed(); - } - log.debug("VFS ['{}'] = {} : {}", vfsName, authzTp, authzName); - } - } catch (NamespaceException e) { - log.error("Unable to initialize AUTHZ DB! Error: {}", e.getMessage(), e); - } - - return spaceAuthzMap; - } - - /** - * Utility method - * - * @param dbFileName - * @return - * @throws AuthzDBReaderException - */ - private static boolean existsAuthzDBFile(String dbFileName) { - - String fileName = configurationPATH + File.separator + dbFileName; - boolean exists = (new File(fileName)).exists(); - if (!exists) { - log.warn("The AuthzDB File '{}' does not exists", dbFileName); - } - return exists; - } - - // **************************************** - // PUBLIC METHODS - // **************************************** - - /****************************** - * SPACE AUTHORIZATION ENGINE - ******************************/ - public static void initializeSpaceAuthz() { - - // Build Space Authzs MAP - spaceAuthzs = buildSpaceAuthzsMAP(); - } - - /** - * Retrieve the Space Authorization module related to the Space Token - * - * @param token - * @return - */ - public static SpaceAuthzInterface getSpaceAuthz(TSpaceToken token) { - - SpaceAuthzInterface spaceAuthz = new MockSpaceAuthz(); - // Retrieve the SpaceAuthz related to the Space Token - if ((spaceAuthzs != null) && (spaceAuthzs.containsKey(token))) { - spaceAuthz = spaceAuthzs.get(token); - log.debug("Space Authz related to S.Token ='{}' is '{}'", token, - spaceAuthz.getSpaceAuthzID()); - } else { - log.debug("Space Authz related to S.Token ='{}' does not exists. " - + "Use the MOCK one.", token); - } - return spaceAuthz; - } - - /****************************** - * PATH AUTHORIZATION ENGINE - ******************************/ - - /** - * Initializating the Path Authorization engine - * - * @param pathAuthz2 - */ - public static void initializePathAuthz(String pathAuthzDBFileName) - throws DirectorException { - - PathAuthzDBReader authzDBReader; - try { - authzDBReader = new PathAuthzDBReader(pathAuthzDBFileName); - } catch (Exception e) { - log.error("Unable to build a PathAuthzDBReader: {}", e.getMessage(), e); - throw new DirectorException("Unable to build a PathAuthzDBReader"); - } - AuthzDirector.pathAuthz = new PathAuthz(authzDBReader.getPathAuthzDB()); - } - - /** - * Retrieve the Path Authorization module - * - * @todo: To implement this. - */ - public static PathAuthzInterface getPathAuthz() { - - return AuthzDirector.pathAuthz; - } + private static final Logger log = LoggerFactory.getLogger(AuthzDirector.class); + private static String configurationPATH; + + // Map between 'SpaceToken' and the related 'SpaceAuthz' + private static Map spaceAuthzs = null; + + // PathAuthz is only one, shared by all SAs + private static PathAuthzInterface pathAuthz = null; + + /** + * Scan the Namespace.xml to retrieve the list of file AuthZDB to digest + */ + private static Map buildSpaceAuthzsMAP() { + + Map spaceAuthzMap = Maps.newHashMap(); + + // Retrieve the list of VFS from Namespace + Namespace ns = Namespace.getInstance(); + List vfss; + try { + vfss = Lists.newArrayList(ns.getAllDefinedVFS()); + for (VirtualFS vfs : vfss) { + String vfsName = vfs.getAliasName(); + SAAuthzType authzTp = vfs.getStorageAreaAuthzType(); + String authzName = ""; + if (authzTp.equals(SAAuthzType.AUTHZDB)) { + // The Space Authz is based on Authz DB + authzName = vfs.getStorageAreaAuthzDB(); + log.debug("Loading AuthzDB '{}'", authzName); + if (existsAuthzDBFile(authzName)) { + // Digest the Space AuthzDB File + TSpaceToken spaceToken = vfs.getSpaceToken(); + SpaceAuthzInterface spaceAuthz = new SpaceDBAuthz(authzName); + spaceAuthzMap.put(spaceToken, spaceAuthz); + } else { + log.error("File AuthzDB '{}' related to '{}' does not exists.", authzName, vfsName); + } + } else { + authzName = vfs.getStorageAreaAuthzFixed(); + } + log.debug("VFS ['{}'] = {} : {}", vfsName, authzTp, authzName); + } + } catch (NamespaceException e) { + log.error("Unable to initialize AUTHZ DB! Error: {}", e.getMessage(), e); + } + + return spaceAuthzMap; + } + + /** + * Utility method + * + * @param dbFileName + * @return + * @throws AuthzDBReaderException + */ + private static boolean existsAuthzDBFile(String dbFileName) { + + String fileName = configurationPATH + File.separator + dbFileName; + boolean exists = (new File(fileName)).exists(); + if (!exists) { + log.warn("The AuthzDB File '{}' does not exists", dbFileName); + } + return exists; + } + + // **************************************** + // PUBLIC METHODS + // **************************************** + + /****************************** + * SPACE AUTHORIZATION ENGINE + ******************************/ + public static void initializeSpaceAuthz() { + + // Build Space Authzs MAP + spaceAuthzs = buildSpaceAuthzsMAP(); + } + + /** + * Retrieve the Space Authorization module related to the Space Token + * + * @param token + * @return + */ + public static SpaceAuthzInterface getSpaceAuthz(TSpaceToken token) { + + SpaceAuthzInterface spaceAuthz = new MockSpaceAuthz(); + // Retrieve the SpaceAuthz related to the Space Token + if ((spaceAuthzs != null) && (spaceAuthzs.containsKey(token))) { + spaceAuthz = spaceAuthzs.get(token); + log.debug("Space Authz related to S.Token ='{}' is '{}'", token, + spaceAuthz.getSpaceAuthzID()); + } else { + log.debug("Space Authz related to S.Token ='{}' does not exists. " + "Use the MOCK one.", + token); + } + return spaceAuthz; + } + + /****************************** + * PATH AUTHORIZATION ENGINE + ******************************/ + + /** + * Initializing the Path Authorization engine + * + * @param pathAuthz2 + */ + public static void initializePathAuthz(String pathAuthzDBFileName) throws DirectorException { + + PathAuthzDBReader authzDBReader; + try { + authzDBReader = new PathAuthzDBReader(pathAuthzDBFileName); + } catch (Exception e) { + log.error("Unable to build a PathAuthzDBReader: {}", e.getMessage(), e); + throw new DirectorException("Unable to build a PathAuthzDBReader"); + } + AuthzDirector.pathAuthz = new PathAuthz(authzDBReader.getPathAuthzDB()); + } + + /** + * Retrieve the Path Authorization module + * + * @todo: To implement this. + */ + public static PathAuthzInterface getPathAuthz() { + + return AuthzDirector.pathAuthz; + } } diff --git a/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDBReader.java b/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDBReader.java index 0acd36171..f1fc6b239 100644 --- a/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDBReader.java +++ b/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDBReader.java @@ -10,7 +10,7 @@ import it.grid.storm.authz.AuthzException; import it.grid.storm.authz.path.model.PathACE; import it.grid.storm.authz.path.model.PathAuthzEvaluationAlgorithm; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; import java.io.BufferedReader; import java.io.File; @@ -42,7 +42,7 @@ public PathAuthzDBReader(String filename) throws Exception { log.info("Path Authorization : Initializing..."); if (!(existsAuthzDBFile(filename))) { - String configurationPATH = Configuration.getInstance().namespaceConfigPath(); + String configurationPATH = StormConfiguration.getInstance().namespaceConfigPath(); if (configurationPATH.length() == 0) { String userDir = System.getProperty("user.dir"); log.debug("Unable to found the configuration path. Assume: '{}'", userDir); diff --git a/src/main/java/it/grid/storm/authz/remote/resource/PermissionEvaluator.java b/src/main/java/it/grid/storm/authz/remote/resource/PermissionEvaluator.java index 99ef444a5..4f2301eee 100644 --- a/src/main/java/it/grid/storm/authz/remote/resource/PermissionEvaluator.java +++ b/src/main/java/it/grid/storm/authz/remote/resource/PermissionEvaluator.java @@ -17,40 +17,23 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - import it.grid.storm.authz.AuthzDecision; import it.grid.storm.authz.AuthzDirector; import it.grid.storm.authz.path.model.PathOperation; import it.grid.storm.authz.path.model.SRMFileRequest; import it.grid.storm.authz.remote.Constants; -import it.grid.storm.catalogs.OverwriteModeConverter; import it.grid.storm.common.types.InvalidStFNAttributeException; import it.grid.storm.common.types.StFN; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; import it.grid.storm.griduser.FQAN; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.model.MappingRule; import it.grid.storm.namespace.model.Protocol; import it.grid.storm.namespace.model.VirtualFS; +import it.grid.storm.persistence.converter.OverwriteModeConverter; import it.grid.storm.srm.types.TOverwriteMode; class PermissionEvaluator { @@ -59,8 +42,7 @@ class PermissionEvaluator { public static Boolean isOverwriteAllowed() { - return OverwriteModeConverter.getInstance() - .toSTORM(Configuration.getInstance().getDefaultOverwriteMode()) + return OverwriteModeConverter.toSTORM(StormConfiguration.getInstance().getDefaultOverwriteMode()) .equals(TOverwriteMode.ALWAYS); } @@ -72,7 +54,7 @@ static Boolean evaluateVomsGridUserPermission(String DNDecoded, String FQANSDeco VirtualFS fileVFS; try { - fileVFS = NamespaceDirector.getNamespace().resolveVFSbyAbsolutePath(filePathDecoded); + fileVFS = Namespace.getInstance().resolveVFSbyAbsolutePath(filePathDecoded); } catch (NamespaceException e) { log.error("Unable to determine a VFS that maps the requested file " + "path '{}'. NamespaceException: {}", filePathDecoded, e.getMessage()); @@ -114,7 +96,7 @@ static Boolean evaluateVomsGridUserPermission(String DNDecoded, String FQANSDeco VirtualFS fileVFS; try { - fileVFS = NamespaceDirector.getNamespace().resolveVFSbyAbsolutePath(filePathDecoded); + fileVFS = Namespace.getInstance().resolveVFSbyAbsolutePath(filePathDecoded); } catch (NamespaceException e) { log.error("Unable to determine a VFS that maps the requested file " + "path '{}'. NamespaceException: {}", filePathDecoded, e.getMessage()); @@ -138,7 +120,7 @@ static Boolean evaluateAnonymousPermission(String filePathDecoded, PathOperation VirtualFS fileVFS; try { - fileVFS = NamespaceDirector.getNamespace().resolveVFSbyAbsolutePath(filePathDecoded); + fileVFS = Namespace.getInstance().resolveVFSbyAbsolutePath(filePathDecoded); } catch (NamespaceException e) { log.error("Unable to determine a VFS that maps the requested file " + "path '{}'. NamespaceException: {}", filePathDecoded, e.getMessage()); @@ -160,7 +142,7 @@ static Boolean evaluateAnonymousPermission(String filePathDecoded, SRMFileReques VirtualFS fileVFS; try { - fileVFS = NamespaceDirector.getNamespace().resolveVFSbyAbsolutePath(filePathDecoded); + fileVFS = Namespace.getInstance().resolveVFSbyAbsolutePath(filePathDecoded); } catch (NamespaceException e) { log.error("Unable to determine a VFS that maps the requested file " + "path '{}'. NamespaceException: {}", filePathDecoded, e.getMessage()); diff --git a/src/main/java/it/grid/storm/authz/sa/SpaceDBAuthz.java b/src/main/java/it/grid/storm/authz/sa/SpaceDBAuthz.java index 4f605e59e..a5d2414ac 100644 --- a/src/main/java/it/grid/storm/authz/sa/SpaceDBAuthz.java +++ b/src/main/java/it/grid/storm/authz/sa/SpaceDBAuthz.java @@ -13,7 +13,7 @@ import org.slf4j.LoggerFactory; import it.grid.storm.authz.sa.model.SRMSpaceRequest; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; import it.grid.storm.griduser.GridUserInterface; /** @@ -45,7 +45,7 @@ public static SpaceDBAuthz makeEmpty() { public SpaceDBAuthz(String dbFileName) { - Configuration config = Configuration.getInstance(); + StormConfiguration config = StormConfiguration.getInstance(); configurationPATH = config.namespaceConfigPath(); if (existsAuthzDBFile(dbFileName)) { this.dbFileName = dbFileName; diff --git a/src/main/java/it/grid/storm/balancer/cache/ResponsivenessCache.java b/src/main/java/it/grid/storm/balancer/cache/ResponsivenessCache.java index efce0fc0a..1b64b7071 100644 --- a/src/main/java/it/grid/storm/balancer/cache/ResponsivenessCache.java +++ b/src/main/java/it/grid/storm/balancer/cache/ResponsivenessCache.java @@ -13,11 +13,11 @@ import com.google.common.collect.Maps; import it.grid.storm.balancer.Node; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; public enum ResponsivenessCache { - INSTANCE(Configuration.getInstance().getServerPoolStatusCheckTimeout()); + INSTANCE(StormConfiguration.getInstance().getServerPoolStatusCheckTimeout()); private static final Logger log = LoggerFactory.getLogger(ResponsivenessCache.class); diff --git a/src/main/java/it/grid/storm/catalogs/BoLChunkCatalog.java b/src/main/java/it/grid/storm/catalogs/BoLChunkCatalog.java index ea71f6f27..8dd66e29c 100644 --- a/src/main/java/it/grid/storm/catalogs/BoLChunkCatalog.java +++ b/src/main/java/it/grid/storm/catalogs/BoLChunkCatalog.java @@ -4,14 +4,30 @@ */ package it.grid.storm.catalogs; -import it.grid.storm.common.types.SizeUnit; +import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.common.types.TURLPrefix; import it.grid.storm.common.types.TimeUnit; -import it.grid.storm.config.Configuration; -import it.grid.storm.griduser.GridUserInterface; -// import it.grid.storm.namespace.SurlStatusStore; +import it.grid.storm.config.StormConfiguration; +import it.grid.storm.persistence.converter.PinLifetimeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.converter.TransferProtocolListConverter; +import it.grid.storm.persistence.dao.BoLChunkDAO; +import it.grid.storm.persistence.exceptions.InvalidReducedBoLChunkDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.persistence.impl.mysql.BoLChunkDAOMySql; +import it.grid.storm.persistence.model.BoLChunkDataTO; +import it.grid.storm.persistence.model.BoLPersistentChunkData; +import it.grid.storm.persistence.model.ReducedBoLChunkData; +import it.grid.storm.persistence.model.ReducedBoLChunkDataTO; import it.grid.storm.srm.types.InvalidTDirOptionAttributesException; -import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; import it.grid.storm.srm.types.InvalidTSURLAttributesException; import it.grid.storm.srm.types.InvalidTSizeAttributesException; import it.grid.storm.srm.types.TDirOption; @@ -23,783 +39,306 @@ import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.srm.types.TTURL; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Timer; -import java.util.TimerTask; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Class that represents StoRMs BoLChunkCatalog: it collects BoLChunkData and - * provides methods for looking up a BoLChunkData based on TRequestToken, as - * well as for adding a new entry and removing an existing one. - * - * @author CNAF - * @date Aug 2009 - * @version 1.0 - */ public class BoLChunkCatalog { - private static final Logger log = LoggerFactory - .getLogger(BoLChunkCatalog.class); - - /* only instance of BoLChunkCatalog present in StoRM! */ - private static final BoLChunkCatalog cat = new BoLChunkCatalog(); - private final BoLChunkDAO dao = BoLChunkDAO.getInstance(); - - /* - * Timer object in charge of transiting expired requests from SRM_FILE_PINNED - * to SRM_RELEASED! - */ - private final Timer transiter = new Timer(); - /* Delay time before starting cleaning thread! */ - private final long delay = Configuration.getInstance() - .getTransitInitialDelay() * 1000; - /* Period of execution of cleaning! */ - private final long period = Configuration.getInstance() - .getTransitTimeInterval() * 1000; - - /** - * Private constructor that starts the internal timer needed to periodically - * check and transit requests whose pinLifetime has expired and are in - * SRM_FILE_PINNED, to SRM_RELEASED. - */ - private BoLChunkCatalog() { - - TimerTask transitTask = new TimerTask() { - - @Override - public void run() { - - transitExpiredSRM_SUCCESS(); - } - }; - transiter.scheduleAtFixedRate(transitTask, delay, period); - } - - /** - * Method that returns the only instance of BoLChunkCatalog available. - */ - public static BoLChunkCatalog getInstance() { - - return cat; - } - - /** - * Method that returns a Collection of BoLChunkData Objects matching the - * supplied TRequestToken. - * - * If any of the data associated to the TRequestToken is not well formed and - * so does not allow a BoLChunkData Object to be created, then that part of - * the request is dropped and gets logged, and the processing continues with - * the next part. All valid chunks get returned: the others get dropped. - * - * If there are no chunks to process then an empty Collection is returned, and - * a message gets logged. - */ - synchronized public Collection lookup(TRequestToken rt) { - - Collection chunkCollection = dao.find(rt); - log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkCollection); - List list = new ArrayList(); - - if (chunkCollection.isEmpty()) { - log.warn("BoL CHUNK CATALOG! No chunks found in persistence for specified " - + "request: {}", rt); - return list; - } - - BoLPersistentChunkData chunk; - for (BoLChunkDataTO chunkTO : chunkCollection) { - chunk = makeOne(chunkTO, rt); - if (chunk == null) { - continue; - } - list.add(chunk); - if (isComplete(chunkTO)) { - continue; - } - try { - dao.updateIncomplete(completeTO(chunkTO, chunk)); - } catch (InvalidReducedBoLChunkDataAttributesException e) { - log.warn("BoL CHUNK CATALOG! unable to add missing informations on DB " - + "to the request: {}", e.getMessage()); - } - } - log.debug("BoL CHUNK CATALOG: returning " + list); - return list; - } - - /** - * Generates a BoLChunkData from the received BoLChunkDataTO - * - * @param auxTO - * @param rt - * @return - */ - private BoLPersistentChunkData makeOne(BoLChunkDataTO auxTO, TRequestToken rt) { - - StringBuilder errorSb = new StringBuilder(); - TSURL fromSURL = null; - try { - fromSURL = TSURL.makeFromStringValidate(auxTO.getFromSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (auxTO.normalizedStFN() != null) { - fromSURL.setNormalizedStFN(auxTO.normalizedStFN()); - } - if (auxTO.sulrUniqueID() != null) { - fromSURL.setUniqueID(auxTO.sulrUniqueID().intValue()); - } - // lifeTime - TLifeTimeInSeconds lifeTime = null; - try { - long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM( - auxTO.getLifeTime()); - // Check for max value allowed - long max = Configuration.getInstance().getPinLifetimeMaximum(); - if (pinLifeTime > max) { - log.warn("PinLifeTime is greater than the max value allowed. " - + "Drop the value to the max = {} seconds", max); - pinLifeTime = max; - } - lifeTime = TLifeTimeInSeconds.make(pinLifeTime, TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // dirOption - TDirOption dirOption = null; - try { - dirOption = new TDirOption(auxTO.getDirOption(), - auxTO.getAllLevelRecursive(), auxTO.getNumLevel()); - } catch (InvalidTDirOptionAttributesException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // transferProtocols - TURLPrefix transferProtocols = TransferProtocolListConverter.toSTORM(auxTO - .getProtocolList()); - if (transferProtocols.size() == 0) { - errorSb.append("\nEmpty list of TransferProtocols or" - + " could not translate TransferProtocols!"); - /* fail construction of BoLChunkData! */ - transferProtocols = null; - } - // fileSize - TSizeInBytes fileSize = null; - try { - fileSize = TSizeInBytes.make(auxTO.getFileSize(), SizeUnit.BYTES); - } catch (InvalidTSizeAttributesException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM( - auxTO.getStatus()); - if (code == TStatusCode.EMPTY) { - errorSb.append("\nRetrieved StatusCode was not recognised: " - + auxTO.getStatus()); - } else { - status = new TReturnStatus(code, auxTO.getErrString()); - } - // transferURL - /* - * whatever is read is just meaningless because BoL will fill it in!!! So - * create an Empty TTURL by default! Vital to avoid problems with unknown - * DPM NULL/EMPTY logic policy! - */ - TTURL transferURL = TTURL.makeEmpty(); - // make BoLChunkData - BoLPersistentChunkData aux = null; - try { - aux = new BoLPersistentChunkData(rt, fromSURL, lifeTime, dirOption, - transferProtocols, fileSize, status, transferURL, - auxTO.getDeferredStartTime()); - aux.setPrimaryKey(auxTO.getPrimaryKey()); - } catch (InvalidSurlRequestDataAttributesException e) { - dao.signalMalformedBoLChunk(auxTO); - log.warn("BoL CHUNK CATALOG! Retrieved malformed BoL " - + "chunk data from persistence. Dropping chunk from request {}", rt); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } - // end... - return aux; - } - - /** - * - * Adds to the received BoLChunkDataTO the normalized StFN and the SURL unique - * ID taken from the BoLChunkData - * - * @param chunkTO - * @param chunk - */ - private void completeTO(ReducedBoLChunkDataTO chunkTO, - final ReducedBoLChunkData chunk) { - - chunkTO.setNormalizedStFN(chunk.fromSURL().normalizedStFN()); - chunkTO.setSurlUniqueID(new Integer(chunk.fromSURL().uniqueId())); - } - - /** - * - * Creates a ReducedBoLChunkDataTO from the received BoLChunkDataTO and - * completes it with the normalized StFN and the SURL unique ID taken from the - * PtGChunkData - * - * @param chunkTO - * @param chunk - * @return - * @throws InvalidReducedBoLChunkDataAttributesException - */ - private ReducedBoLChunkDataTO completeTO(BoLChunkDataTO chunkTO, - final BoLPersistentChunkData chunk) - throws InvalidReducedBoLChunkDataAttributesException { - - ReducedBoLChunkDataTO reducedChunkTO = this.reduce(chunkTO); - this.completeTO(reducedChunkTO, this.reduce(chunk)); - return reducedChunkTO; - } - - /** - * Creates a ReducedBoLChunkData from the data contained in the received - * BoLChunkData - * - * @param chunk - * @return - * @throws InvalidReducedBoLChunkDataAttributesException - */ - private ReducedBoLChunkData reduce(BoLPersistentChunkData chunk) - throws InvalidReducedBoLChunkDataAttributesException { - - ReducedBoLChunkData reducedChunk = new ReducedBoLChunkData(chunk.getSURL(), - chunk.getStatus()); - reducedChunk.setPrimaryKey(chunk.getPrimaryKey()); - return reducedChunk; - } - - /** - * Creates a ReducedBoLChunkDataTO from the data contained in the received - * BoLChunkDataTO - * - * @param chunkTO - * @return - */ - private ReducedBoLChunkDataTO reduce(BoLChunkDataTO chunkTO) { - - ReducedBoLChunkDataTO reducedChunkTO = new ReducedBoLChunkDataTO(); - reducedChunkTO.setPrimaryKey(chunkTO.getPrimaryKey()); - reducedChunkTO.setFromSURL(chunkTO.getFromSURL()); - reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN()); - reducedChunkTO.setSurlUniqueID(chunkTO.sulrUniqueID()); - reducedChunkTO.setStatus(chunkTO.getStatus()); - reducedChunkTO.setErrString(chunkTO.getErrString()); - return reducedChunkTO; - } - - /** - * Checks if the received BoLChunkDataTO contains the fields not set by the - * front end but required - * - * @param chunkTO - * @return - */ - private boolean isComplete(BoLChunkDataTO chunkTO) { - - return (chunkTO.normalizedStFN() != null) - && (chunkTO.sulrUniqueID() != null); - } - - /** - * Checks if the received ReducedBoLChunkDataTO contains the fields not set by - * the front end but required - * - * @param reducedChunkTO - * @return - */ - // TODO MICHELE USER_SURL new method - private boolean isComplete(ReducedBoLChunkDataTO reducedChunkTO) { - - return (reducedChunkTO.normalizedStFN() != null) - && (reducedChunkTO.surlUniqueID() != null); - } - - /** - * Method used to update into Persistence a retrieved BoLChunkData. In case - * any error occurs, the operation does not proceed but no Exception is - * thrown. Error messages get logged. - * - * Only fileSize, StatusCode, errString and transferURL are updated. Likewise - * for the request pinLifetime. - */ - synchronized public void update(BoLPersistentChunkData cd) { - - BoLChunkDataTO to = new BoLChunkDataTO(); - /* Primary key needed by DAO Object */ - to.setPrimaryKey(cd.getPrimaryKey()); - to.setFileSize(cd.getFileSize().value()); - to.setStatus(StatusCodeConverter.getInstance().toDB( - cd.getStatus().getStatusCode())); - to.setErrString(cd.getStatus().getExplanation()); - to.setLifeTime(PinLifetimeConverter.getInstance().toDB( - cd.getLifeTime().value())); - // TODO MICHELE USER_SURL fill new fields - to.setNormalizedStFN(cd.getSURL().normalizedStFN()); - to.setSurlUniqueID(new Integer(cd.getSURL().uniqueId())); - - dao.update(to); - // TODO MICHELE SURL STORE - // SurlStatusStore.getInstance().storeSurlStatus(cd.getSURL(), - // cd.getStatus().getStatusCode()); - } - - /** - * Refresh method. TODO THIS IS A WORK IN PROGRESS!!!! This method have to - * synch the ChunkData information with the database status. - * - * @param auxTO - * @param BoLPersistentChunkData - * inputChunk - * @return BoLChunkData outputChunk - */ - synchronized public BoLPersistentChunkData refreshStatus( - BoLPersistentChunkData inputChunk) { - - /* Currently not used */ - // Call the dao refresh method to synch with the db status - BoLChunkDataTO auxTO = dao.refresh(inputChunk.getPrimaryKey()); - - log.debug("BoL CHUNK CATALOG: retrieved data {}", auxTO); - if (auxTO == null) { - log.warn("BoL CHUNK CATALOG! Empty TO found in persistence for specified " - + "request: {}", inputChunk.getPrimaryKey()); - return inputChunk; - } - - /* - * In this first version the only field updated is the Status. Once - * updated, the new status is rewritten into the input ChunkData - */ - - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM(auxTO.getStatus()); - if (code != TStatusCode.EMPTY) { - status = new TReturnStatus(code, auxTO.getErrString()); - } - inputChunk.setStatus(status); - return inputChunk; - } - - /** - * Method that returns a Collection of ReducedBoLChunkData Objects associated - * to the supplied TRequestToken. - * - * If any of the data retrieved for a given chunk is not well formed and so - * does not allow a ReducedBoLChunkData Object to be created, then that chunk - * is dropped and gets logged, while processing continues with the next one. - * All valid chunks get returned: the others get dropped. - * - * If there are no chunks associated to the given TRequestToken, then an empty - * Collection is returned and a messagge gets logged. - */ - synchronized public Collection lookupReducedBoLChunkData( - TRequestToken rt) { - - Collection reducedChunkDataTOs = dao.findReduced(rt - .getValue()); - log.debug("BoL CHUNK CATALOG: retrieved data {}", reducedChunkDataTOs); - ArrayList list = new ArrayList(); - if (reducedChunkDataTOs.isEmpty()) { - log.debug("BoL CHUNK CATALOG! No chunks found in persistence for {}", rt); - } else { - ReducedBoLChunkData reducedChunkData = null; - for (ReducedBoLChunkDataTO reducedChunkDataTO : reducedChunkDataTOs) { - reducedChunkData = makeOneReduced(reducedChunkDataTO); - if (reducedChunkData != null) { - list.add(reducedChunkData); - if (!this.isComplete(reducedChunkDataTO)) { - completeTO(reducedChunkDataTO, reducedChunkData); - dao.updateIncomplete(reducedChunkDataTO); - } - } - } - log.debug("BoL CHUNK CATALOG: returning {}", list); - } - return list; - } - - public Collection lookupReducedBoLChunkData( - TRequestToken requestToken, Collection surls) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.findReduced( - requestToken, surlsUniqueIDs, surlsArray); - return buildReducedChunkDataList(chunkDataTOCollection); - } - - public Collection lookupBoLChunkData(TSURL surl, - GridUserInterface user) { - - return lookupBoLChunkData(Arrays.asList(new TSURL[] { surl }), user); - } - - public Collection lookupBoLChunkData(TSURL surl) { - - return lookupBoLChunkData(Arrays.asList(new TSURL[] { surl })); - } - - private Collection lookupBoLChunkData( - List surls, GridUserInterface user) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.find(surlsUniqueIDs, - surlsArray, user.getDn()); - log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkDataTOCollection); - return buildChunkDataList(chunkDataTOCollection); - } - - public Collection lookupBoLChunkData(List surls) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.find(surlsUniqueIDs, - surlsArray); - log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkDataTOCollection); - return buildChunkDataList(chunkDataTOCollection); - } - - private Collection buildChunkDataList( - Collection chunkDataTOCollection) { - - List list = new ArrayList(); - BoLPersistentChunkData chunk; - for (BoLChunkDataTO chunkTO : chunkDataTOCollection) { - chunk = makeOne(chunkTO); - if (chunk == null) { - continue; - } - list.add(chunk); - if (isComplete(chunkTO)) { - continue; - } - try { - dao.updateIncomplete(this.completeTO(chunkTO, chunk)); - } catch (InvalidReducedBoLChunkDataAttributesException e) { - log.warn("BoL CHUNK CATALOG! unable to add missing informations " - + "on DB to the request: {}", e.getMessage()); - } - } - log.debug("BoL CHUNK CATALOG: returning {}", list); - return list; - } - - private BoLPersistentChunkData makeOne(BoLChunkDataTO chunkTO) { - - try { - return makeOne(chunkTO, new TRequestToken(chunkTO.getRequestToken(), - chunkTO.getTimeStamp())); - } catch (InvalidTRequestTokenAttributesException e) { - throw new IllegalStateException( - "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: " - + e); - } - } - - /** - * Method that returns a Collection of ReducedBoLChunkData Objects matching - * the supplied GridUser and Collection of TSURLs. - * - * If any of the data retrieved for a given chunk is not well formed and so - * does not allow a ReducedBoLChunkData Object to be created, then that chunk - * is dropped and gets logged, while processing continues with the next one. - * All valid chunks get returned: the others get dropped. - * - * If there are no chunks associated to the given GridUser and Collection of - * TSURLs, then an empty Collection is returned and a message gets logged. - */ - synchronized public Collection lookupReducedBoLChunkData( - GridUserInterface gu, Collection tsurlCollection) { - - int[] surlsUniqueIDs = new int[tsurlCollection.size()]; - String[] surls = new String[tsurlCollection.size()]; - int index = 0; - for (TSURL tsurl : tsurlCollection) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surls[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.findReduced( - gu.getDn(), surlsUniqueIDs, surls); - log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkDataTOCollection); - return buildReducedChunkDataList(chunkDataTOCollection); - } - - private Collection buildReducedChunkDataList( - Collection chunkDataTOCollection) { - - ArrayList list = new ArrayList(); - ReducedBoLChunkData reducedChunkData; - for (ReducedBoLChunkDataTO reducedChunkDataTO : chunkDataTOCollection) { - reducedChunkData = makeOneReduced(reducedChunkDataTO); - if (reducedChunkData != null) { - list.add(reducedChunkData); - if (!this.isComplete(reducedChunkDataTO)) { - this.completeTO(reducedChunkDataTO, reducedChunkData); - dao.updateIncomplete(reducedChunkDataTO); - } - } - } - log.debug("BoL CHUNK CATALOG: returning {}", list); - return list; - } - - /** - * @param auxTO - * @return - */ - private ReducedBoLChunkData makeOneReduced( - ReducedBoLChunkDataTO reducedChunkDataTO) { - - StringBuilder errorSb = new StringBuilder(); - // fromSURL - TSURL fromSURL = null; - try { - fromSURL = TSURL.makeFromStringValidate(reducedChunkDataTO.fromSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (reducedChunkDataTO.normalizedStFN() != null) { - fromSURL.setNormalizedStFN(reducedChunkDataTO.normalizedStFN()); - } - if (reducedChunkDataTO.surlUniqueID() != null) { - fromSURL.setUniqueID(reducedChunkDataTO.surlUniqueID().intValue()); - } - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM( - reducedChunkDataTO.status()); - if (code == TStatusCode.EMPTY) { - errorSb.append("\nRetrieved StatusCode was not recognised: " - + reducedChunkDataTO.status()); - } else { - status = new TReturnStatus(code, reducedChunkDataTO.errString()); - } - // make ReducedBoLChunkData - ReducedBoLChunkData aux = null; - try { - aux = new ReducedBoLChunkData(fromSURL, status); - aux.setPrimaryKey(reducedChunkDataTO.primaryKey()); - } catch (InvalidReducedBoLChunkDataAttributesException e) { - log.warn("BoL CHUNK CATALOG! Retrieved malformed " - + "Reduced BoL chunk data from persistence: dropping reduced chunk..."); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } - // end... - return aux; - } - - /** - * Method used to add into Persistence a new entry. The supplied BoLChunkData - * gets the primary key changed to the value assigned in Persistence. - * - * This method is intended to be used by a recursive BoL request: the parent - * request supplies a directory which must be expanded, so all new children - * requests resulting from the files in the directory are added into - * persistence. - * - * So this method does _not_ add a new SRM prepare_to_get request into the DB! - * - * The only children data written into the DB are: sourceSURL, TDirOption, - * statusCode and explanation. - * - * In case of any error the operation does not proceed, but no Exception is - * thrown! Proper messages get logged by underlaying DAO. - */ - synchronized public void addChild(BoLPersistentChunkData chunkData) { - - BoLChunkDataTO to = new BoLChunkDataTO(); - // needed for now to find ID of request! Must be changed soon! - to.setRequestToken(chunkData.getRequestToken().toString()); - to.setFromSURL(chunkData.getSURL().toString()); - to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); - to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId())); - - to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive()); - to.setDirOption(chunkData.getDirOption().isDirectory()); - to.setNumLevel(chunkData.getDirOption().getNumLevel()); - to.setStatus(StatusCodeConverter.getInstance().toDB( - chunkData.getStatus().getStatusCode())); - to.setErrString(chunkData.getStatus().getExplanation()); - to.setDeferredStartTime(chunkData.getDeferredStartTime()); - - /* add the entry and update the Primary Key field */ - dao.addChild(to); - chunkData.setPrimaryKey(to.getPrimaryKey()); - } - - /** - * Method used to add into Persistence a new entry. The supplied BoLChunkData - * gets the primary key changed to the value assigned in the Persistence. The - * method requires the GridUser to whom associate the added request. - * - * This method is intended to be used by an srmCopy request in push mode which - * implies a local srmBoL. The only fields from BoLChunkData that are - * considered are: the requestToken, the sourceSURL, the pinLifetime, the - * dirOption, the protocolList, the status and error string. - * - * So this method _adds_ a new SRM prepare_to_get request into the DB! - * - * In case of any error the operation does not proceed, but no Exception is - * thrown! The underlaying DAO logs proper error messages. - */ - synchronized public void add(BoLPersistentChunkData chunkData, - GridUserInterface gu) { - - /* Currently NOT used */ - BoLChunkDataTO to = new BoLChunkDataTO(); - to.setRequestToken(chunkData.getRequestToken().toString()); - to.setFromSURL(chunkData.getSURL().toString()); - // TODO MICHELE USER_SURL fill new fields - to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); - to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId())); - - to.setLifeTime(new Long(chunkData.getLifeTime().value()).intValue()); - to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive()); - to.setDirOption(chunkData.getDirOption().isDirectory()); - to.setNumLevel(chunkData.getDirOption().getNumLevel()); - to.setProtocolList(TransferProtocolListConverter.toDB(chunkData - .getTransferProtocols())); - to.setStatus(StatusCodeConverter.getInstance().toDB( - chunkData.getStatus().getStatusCode())); - to.setErrString(chunkData.getStatus().getExplanation()); - to.setDeferredStartTime(chunkData.getDeferredStartTime()); - - /* add the entry and update the Primary Key field! */ - dao.addNew(to, gu.getDn()); - chunkData.setPrimaryKey(to.getPrimaryKey()); - } - - /** - * Method used to establish if in Persistence there is a BoLChunkData working - * on the supplied SURL, and whose state is SRM_FILE_PINNED, in which case - * true is returned. In case none are found or there is any problem, false is - * returned. This method is intended to be used by srmMv. - */ - synchronized public boolean isSRM_FILE_PINNED(TSURL surl) { - - return (dao.numberInSRM_SUCCESS(surl.uniqueId()) > 0); - } - - /** - * Method used to transit the specified Collection of ReducedBoLChunkData from - * SRM_FILE_PINNED to SRM_RELEASED. Chunks in any other starting state are not - * transited. In case of any error nothing is done, but proper error messages - * get logged by the underlaying DAO. - */ - synchronized public void transitSRM_SUCCESStoSRM_RELEASED( - Collection chunks, TRequestToken token) { - - if (chunks == null || chunks.isEmpty()) { - return; - } - - long[] primaryKeys = new long[chunks.size()]; - int index = 0; - for (ReducedBoLChunkData chunkData : chunks) { - if (chunkData != null) { - primaryKeys[index] = chunkData.primaryKey(); - index++; - } - } - dao.transitSRM_SUCCESStoSRM_RELEASED(primaryKeys, token); - } - - /** - * This method is intended to be used by srmRm to transit all BoL chunks on - * the given SURL which are in the SRM_FILE_PINNED state, to SRM_ABORTED. The - * supplied String will be used as explanation in those chunks return status. - * The global status of the request is _not_ changed. - * - * The TURL of those requests will automatically be set to empty. Notice that - * both removeAllJit(SURL) and removeVolatile(SURL) are automatically invoked - * on PinnedFilesCatalog, to remove any entry and corresponding physical ACLs. - * - * Beware, that the chunks may be part of requests that have finished, or that - * still have not finished because other chunks are being processed. - */ - synchronized public void transitSRM_SUCCESStoSRM_ABORTED(TSURL surl, - String explanation) { - - /* Currently NOT used */ - if (explanation == null) { - explanation = ""; - } - dao.transitSRM_SUCCESStoSRM_ABORTED(surl.uniqueId(), surl.toString(), - explanation); - } - - /** - * Method used to force transition to SRM_RELEASED from SRM_FILE_PINNED, of - * all BoL Requests whose pinLifetime has expired and the state still has not - * been changed (a user forgot to run srmReleaseFiles)! - */ - synchronized public void transitExpiredSRM_SUCCESS() { - - dao.transitExpiredSRM_SUCCESS(); - } - - public void updateFromPreviousStatus(TRequestToken requestToken, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) { - - dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, - newStatusCode, explanation); - } - - public void updateFromPreviousStatus(TRequestToken requestToken, - List surlList, TStatusCode expectedStatusCode, - TStatusCode newStatusCode) { - - int[] surlsUniqueIDs = new int[surlList.size()]; - String[] surls = new String[surlList.size()]; - int index = 0; - for (TSURL tsurl : surlList) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surls[index] = tsurl.rawSurl(); - index++; - } - dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode); - } + private static final Logger log = LoggerFactory.getLogger(BoLChunkCatalog.class); + + private final BoLChunkDAO dao; + + private static BoLChunkCatalog instance; + + public static synchronized BoLChunkCatalog getInstance() { + if (instance == null) { + instance = new BoLChunkCatalog(); + } + return instance; + } + + /** + * Private constructor that starts the internal timer needed to periodically check and transit + * requests whose pinLifetime has expired and are in SRM_FILE_PINNED, to SRM_RELEASED. + */ + private BoLChunkCatalog() { + + dao = BoLChunkDAOMySql.getInstance(); + } + + /** + * Method that returns a Collection of BoLChunkData Objects matching the supplied TRequestToken. + * + * If any of the data associated to the TRequestToken is not well formed and so does not allow a + * BoLChunkData Object to be created, then that part of the request is dropped and gets logged, + * and the processing continues with the next part. All valid chunks get returned: the others get + * dropped. + * + * If there are no chunks to process then an empty Collection is returned, and a message gets + * logged. + */ + synchronized public Collection lookup(TRequestToken rt) { + + Collection chunkCollection = dao.find(rt); + log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkCollection); + List list = new ArrayList(); + + if (chunkCollection.isEmpty()) { + log.warn("BoL CHUNK CATALOG! No chunks found in persistence for specified request: {}", rt); + return list; + } + + BoLPersistentChunkData chunk; + for (BoLChunkDataTO chunkTO : chunkCollection) { + chunk = makeOne(chunkTO, rt); + if (chunk == null) { + continue; + } + list.add(chunk); + if (isComplete(chunkTO)) { + continue; + } + try { + dao.updateIncomplete(completeTO(chunkTO, chunk)); + } catch (InvalidReducedBoLChunkDataAttributesException e) { + log.warn( + "BoL CHUNK CATALOG! unable to add missing informations on DB " + "to the request: {}", + e.getMessage()); + } + } + log.debug("BoL CHUNK CATALOG: returning " + list); + return list; + } + + /** + * Generates a BoLChunkData from the received BoLChunkDataTO + * + * @param auxTO + * @param rt + * @return + */ + private BoLPersistentChunkData makeOne(BoLChunkDataTO auxTO, TRequestToken rt) { + + StringBuilder errorSb = new StringBuilder(); + TSURL fromSURL = null; + try { + fromSURL = TSURL.makeFromStringValidate(auxTO.getFromSURL()); + } catch (InvalidTSURLAttributesException e) { + errorSb.append(e); + } + if (auxTO.normalizedStFN() != null) { + fromSURL.setNormalizedStFN(auxTO.normalizedStFN()); + } + if (auxTO.sulrUniqueID() != null) { + fromSURL.setUniqueID(auxTO.sulrUniqueID().intValue()); + } + // lifeTime + TLifeTimeInSeconds lifeTime = null; + try { + long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM(auxTO.getLifeTime()); + // Check for max value allowed + long max = StormConfiguration.getInstance().getPinLifetimeMaximum(); + if (pinLifeTime > max) { + log.warn("PinLifeTime is greater than the max value allowed. " + + "Drop the value to the max = {} seconds", max); + pinLifeTime = max; + } + lifeTime = TLifeTimeInSeconds.make(pinLifeTime, TimeUnit.SECONDS); + } catch (IllegalArgumentException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // dirOption + TDirOption dirOption = null; + try { + dirOption = + new TDirOption(auxTO.getDirOption(), auxTO.getAllLevelRecursive(), auxTO.getNumLevel()); + } catch (InvalidTDirOptionAttributesException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // transferProtocols + TURLPrefix transferProtocols = TransferProtocolListConverter.toSTORM(auxTO.getProtocolList()); + if (transferProtocols.size() == 0) { + errorSb + .append("\nEmpty list of TransferProtocols or" + " could not translate TransferProtocols!"); + /* fail construction of BoLChunkData! */ + transferProtocols = null; + } + // fileSize + TSizeInBytes fileSize = null; + try { + fileSize = TSizeInBytes.make(auxTO.getFileSize()); + } catch (InvalidTSizeAttributesException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // status + TReturnStatus status = null; + TStatusCode code = StatusCodeConverter.getInstance().toSTORM(auxTO.getStatus()); + if (code == TStatusCode.EMPTY) { + errorSb.append("\nRetrieved StatusCode was not recognised: " + auxTO.getStatus()); + } else { + status = new TReturnStatus(code, auxTO.getErrString()); + } + // transferURL + /* + * whatever is read is just meaningless because BoL will fill it in!!! So create an Empty TTURL + * by default! Vital to avoid problems with unknown DPM NULL/EMPTY logic policy! + */ + TTURL transferURL = TTURL.makeEmpty(); + // make BoLChunkData + BoLPersistentChunkData aux = null; + try { + aux = new BoLPersistentChunkData(rt, fromSURL, lifeTime, dirOption, transferProtocols, + fileSize, status, transferURL, auxTO.getDeferredStartTime()); + aux.setPrimaryKey(auxTO.getPrimaryKey()); + } catch (InvalidSurlRequestDataAttributesException e) { + dao.updateStatus(auxTO, SRM_FAILURE, "Request is malformed!"); + log.warn("BoL CHUNK CATALOG! Retrieved malformed BoL " + + "chunk data from persistence. Dropping chunk from request {}", rt); + log.warn(e.getMessage(), e); + log.warn(errorSb.toString()); + } + // end... + return aux; + } + + /** + * + * Adds to the received BoLChunkDataTO the normalized StFN and the SURL unique ID taken from the + * BoLChunkData + * + * @param chunkTO + * @param chunk + */ + private void completeTO(ReducedBoLChunkDataTO chunkTO, final ReducedBoLChunkData chunk) { + + chunkTO.setNormalizedStFN(chunk.fromSURL().normalizedStFN()); + chunkTO.setSurlUniqueID(Integer.valueOf(chunk.fromSURL().uniqueId())); + } + + /** + * + * Creates a ReducedBoLChunkDataTO from the received BoLChunkDataTO and completes it with the + * normalized StFN and the SURL unique ID taken from the PtGChunkData + * + * @param chunkTO + * @param chunk + * @return + * @throws InvalidReducedBoLChunkDataAttributesException + */ + private ReducedBoLChunkDataTO completeTO(BoLChunkDataTO chunkTO, + final BoLPersistentChunkData chunk) throws InvalidReducedBoLChunkDataAttributesException { + + ReducedBoLChunkDataTO reducedChunkTO = this.reduce(chunkTO); + this.completeTO(reducedChunkTO, this.reduce(chunk)); + return reducedChunkTO; + } + + /** + * Creates a ReducedBoLChunkData from the data contained in the received BoLChunkData + * + * @param chunk + * @return + * @throws InvalidReducedBoLChunkDataAttributesException + */ + private ReducedBoLChunkData reduce(BoLPersistentChunkData chunk) + throws InvalidReducedBoLChunkDataAttributesException { + + ReducedBoLChunkData reducedChunk = new ReducedBoLChunkData(chunk.getSURL(), chunk.getStatus()); + reducedChunk.setPrimaryKey(chunk.getPrimaryKey()); + return reducedChunk; + } + + /** + * Creates a ReducedBoLChunkDataTO from the data contained in the received BoLChunkDataTO + * + * @param chunkTO + * @return + */ + private ReducedBoLChunkDataTO reduce(BoLChunkDataTO chunkTO) { + + ReducedBoLChunkDataTO reducedChunkTO = new ReducedBoLChunkDataTO(); + reducedChunkTO.setPrimaryKey(chunkTO.getPrimaryKey()); + reducedChunkTO.setFromSURL(chunkTO.getFromSURL()); + reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN()); + reducedChunkTO.setSurlUniqueID(chunkTO.sulrUniqueID()); + reducedChunkTO.setStatus(chunkTO.getStatus()); + reducedChunkTO.setErrString(chunkTO.getErrString()); + return reducedChunkTO; + } + + /** + * Checks if the received BoLChunkDataTO contains the fields not set by the front end but required + * + * @param chunkTO + * @return + */ + private boolean isComplete(BoLChunkDataTO chunkTO) { + + return (chunkTO.normalizedStFN() != null) && (chunkTO.sulrUniqueID() != null); + } + + /** + * Method used to update into Persistence a retrieved BoLChunkData. In case any error occurs, the + * operation does not proceed but no Exception is thrown. Error messages get logged. + * + * Only fileSize, StatusCode, errString and transferURL are updated. Likewise for the request + * pinLifetime. + */ + synchronized public void update(BoLPersistentChunkData cd) { + + BoLChunkDataTO to = new BoLChunkDataTO(); + /* Primary key needed by DAO Object */ + to.setPrimaryKey(cd.getPrimaryKey()); + to.setFileSize(cd.getFileSize().value()); + to.setStatus(StatusCodeConverter.getInstance().toDB(cd.getStatus().getStatusCode())); + to.setErrString(cd.getStatus().getExplanation()); + to.setLifeTime(PinLifetimeConverter.getInstance().toDB(cd.getLifeTime().value())); + to.setNormalizedStFN(cd.getSURL().normalizedStFN()); + to.setSurlUniqueID(Integer.valueOf(cd.getSURL().uniqueId())); + dao.update(to); + } + + /** + * Method used to add into Persistence a new entry. The supplied BoLChunkData gets the primary key + * changed to the value assigned in Persistence. + * + * This method is intended to be used by a recursive BoL request: the parent request supplies a + * directory which must be expanded, so all new children requests resulting from the files in the + * directory are added into persistence. + * + * So this method does _not_ add a new SRM prepare_to_get request into the DB! + * + * The only children data written into the DB are: sourceSURL, TDirOption, statusCode and + * explanation. + * + * In case of any error the operation does not proceed, but no Exception is thrown! Proper + * messages get logged by underlying DAO. + */ + synchronized public void addChild(BoLPersistentChunkData chunkData) { + + BoLChunkDataTO to = new BoLChunkDataTO(); + // needed for now to find ID of request! Must be changed soon! + to.setRequestToken(chunkData.getRequestToken().toString()); + to.setFromSURL(chunkData.getSURL().toString()); + to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); + to.setSurlUniqueID(Integer.valueOf(chunkData.getSURL().uniqueId())); + + to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive()); + to.setDirOption(chunkData.getDirOption().isDirectory()); + to.setNumLevel(chunkData.getDirOption().getNumLevel()); + to.setStatus(StatusCodeConverter.getInstance().toDB(chunkData.getStatus().getStatusCode())); + to.setErrString(chunkData.getStatus().getExplanation()); + to.setDeferredStartTime(chunkData.getDeferredStartTime()); + + /* add the entry and update the Primary Key field */ + dao.addChild(to); + chunkData.setPrimaryKey(to.getPrimaryKey()); + } + + public void updateFromPreviousStatus(TRequestToken requestToken, TStatusCode expectedStatusCode, + TStatusCode newStatusCode, String explanation) { + + dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, newStatusCode, explanation); + } } diff --git a/src/main/java/it/grid/storm/catalogs/BoLChunkDAO.java b/src/main/java/it/grid/storm/catalogs/BoLChunkDAO.java deleted file mode 100644 index 2c4e45f48..000000000 --- a/src/main/java/it/grid/storm/catalogs/BoLChunkDAO.java +++ /dev/null @@ -1,1688 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.config.Configuration; -import it.grid.storm.ea.StormEA; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.StoRI; -import it.grid.storm.namespace.naming.SURL; -import it.grid.storm.srm.types.InvalidTSURLAttributesException; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TRequestType; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TStatusCode; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.Statement; -import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Date; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map.Entry; -import java.util.Timer; -import java.util.TimerTask; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * DAO class for BoLChunkCatalog. This DAO is specifically designed to connect - * to a MySQL DB. The raw data found in those tables is pre-treated in order to - * turn it into the Object Model of StoRM. See Method comments for further info. - * BEWARE! DAO Adjusts for extra fields in the DB that are not present in the - * object model. - * - * @author CNAF - * @version 1.0 - * @date Aug 2009 - */ -public class BoLChunkDAO { - - private static final Logger log = LoggerFactory.getLogger(BoLChunkDAO.class); - - /** String with the name of the class for the DB driver */ - private final String driver = Configuration.getInstance().getDBDriver(); - /** String referring to the URL of the DB */ - private final String url = Configuration.getInstance().getStormDbURL(); - /** String with the password for the DB */ - private final String password = Configuration.getInstance().getDBPassword(); - /** String with the name for the DB */ - private final String name = Configuration.getInstance().getDBUserName(); - /** Connection to DB - WARNING!!! It is kept open all the time! */ - private Connection con = null; - private final static BoLChunkDAO dao = new BoLChunkDAO(); - - /** - * timer thread that will run a taask to alert when reconnecting is necessary! - */ - private Timer clock = null; - /** - * timer task that will update the boolean signaling that a reconnection is - * needed! - */ - private TimerTask clockTask = null; - /** milliseconds that must pass before reconnecting to DB */ - private final long period = Configuration.getInstance() - .getDBReconnectPeriod() * 1000; - /** initial delay in milliseconds before starting timer */ - private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000; - /** boolean that tells whether reconnection is needed because of MySQL bug! */ - private boolean reconnect = false; - - private BoLChunkDAO() { - - setUpConnection(); - - clock = new Timer(); - clockTask = new TimerTask() { - - @Override - public void run() { - - reconnect = true; - } - }; // clock task - clock.scheduleAtFixedRate(clockTask, delay, period); - } - - /** - * Method that returns the only instance of the BoLChunkDAO. - */ - public static BoLChunkDAO getInstance() { - - return dao; - } - - /** - * Method used to add a new record to the DB: the supplied BoLChunkDataTO gets - * its primaryKey changed to the one assigned by the DB. The supplied - * BoLChunkData is used to fill in only the DB table where file specific info - * gets recorded: it does _not_ add a new request! So if spurious data is - * supplied, it will just stay there because of a lack of a parent request! - */ - public synchronized void addChild(BoLChunkDataTO to) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: addChild - unable to get a valid connection!"); - return; - } - String str = null; - PreparedStatement id = null; // statement to find out the ID associated to - // the request token - ResultSet rsid = null; // result set containing the ID of the request. - // insertion - try { - - /* WARNING!!!! We are forced to run a query to get the ID of the request, - * which should NOT be so because the corresponding request object should - * have been changed with the extra field! However, it is not possible - * at the moment to perform such change because of strict deadline and - * the change could wreak havoc the code. So we are forced to make this - * query!!! - */ - - // begin transaction - con.setAutoCommit(false); - logWarnings(con.getWarnings()); - - // find ID of request corresponding to given RequestToken - str = "SELECT rq.ID FROM request_queue rq WHERE rq.r_token=?"; - - id = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - id.setString(1, to.getRequestToken()); - logWarnings(id.getWarnings()); - - log.debug("BoL CHUNK DAO: addChild; {}", id.toString()); - rsid = id.executeQuery(); - logWarnings(id.getWarnings()); - - /* ID of request in request_process! */ - int request_id = extractID(rsid); - int id_s = fillBoLTables(to, request_id); - - // end transaction! - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); - logWarnings(con.getWarnings()); - - // update primary key reading the generated key - to.setPrimaryKey(id_s); - } catch (SQLException e) { - log.error("BoL CHUNK DAO: unable to complete addChild! BoLChunkDataTO: {}; " - + "exception received: {}", to, e.getMessage(), e); - rollback(con); - } catch (Exception e) { - log.error("BoL CHUNK DAO: unable to complete addChild! BoLChunkDataTO: {}; " - + "exception received: {}", to, e.getMessage(), e); - rollback(con); - } finally { - close(rsid); - close(id); - } - } - - /** - * Method used to add a new record to the DB: the supplied BoLChunkDataTO gets - * its primaryKey changed to the one assigned by the DB. The client_dn must - * also be supplied as a String. The supplied BoLChunkData is used to fill in - * all the DB tables where file specific info gets recorded: it _adds_ a new - * request! - */ - public synchronized void addNew(BoLChunkDataTO to, String client_dn) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: addNew - unable to get a valid connection!"); - return; - } - String str = null; - /* Result set containing the ID of the inserted new request */ - ResultSet rs_new = null; - /* Insert new request into process_request */ - PreparedStatement addNew = null; - /* Insert protocols for request. */ - PreparedStatement addProtocols = null; // insert protocols for request. - try { - // begin transaction - con.setAutoCommit(false); - logWarnings(con.getWarnings()); - - // add to request_queue... - str = "INSERT INTO request_queue (config_RequestTypeID,client_dn,pinLifetime,status,errstring,r_token,nbreqfiles,timeStamp,deferredStartTime) VALUES (?,?,?,?,?,?,?,?,?)"; - addNew = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - logWarnings(con.getWarnings()); - /* request type set to bring online */ - addNew.setString(1, - RequestTypeConverter.getInstance().toDB(TRequestType.BRING_ON_LINE)); - logWarnings(addNew.getWarnings()); - - addNew.setString(2, client_dn); - logWarnings(addNew.getWarnings()); - - addNew.setInt(3, to.getLifeTime()); - logWarnings(addNew.getWarnings()); - - addNew.setInt( - 4, - StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_INPROGRESS)); - logWarnings(addNew.getWarnings()); - - addNew.setString(5, "New BoL Request resulting from srmCopy invocation."); - logWarnings(addNew.getWarnings()); - - addNew.setString(6, to.getRequestToken()); - logWarnings(addNew.getWarnings()); - - addNew.setInt(7, 1); // number of requested files set to 1! - logWarnings(addNew.getWarnings()); - - addNew.setTimestamp(8, new Timestamp(new Date().getTime())); - logWarnings(addNew.getWarnings()); - - addNew.setInt(9, to.getDeferredStartTime()); - logWarnings(addNew.getWarnings()); - - log.trace("BoL CHUNK DAO: addNew; {}", addNew.toString()); - addNew.execute(); - logWarnings(addNew.getWarnings()); - - rs_new = addNew.getGeneratedKeys(); - int id_new = extractID(rs_new); - - // add protocols... - str = "INSERT INTO request_TransferProtocols (request_queueID,config_ProtocolsID) VALUES (?,?)"; - addProtocols = con.prepareStatement(str); - logWarnings(con.getWarnings()); - for (Iterator i = to.getProtocolList().iterator(); i.hasNext();) { - addProtocols.setInt(1, id_new); - logWarnings(addProtocols.getWarnings()); - - addProtocols.setString(2, i.next()); - logWarnings(addProtocols.getWarnings()); - - log.trace("BoL CHUNK DAO: addNew; {}", addProtocols.toString()); - addProtocols.execute(); - logWarnings(addProtocols.getWarnings()); - } - - // addChild... - int id_s = fillBoLTables(to, id_new); - - // end transaction! - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); - logWarnings(con.getWarnings()); - - // update primary key reading the generated key - to.setPrimaryKey(id_s); - } catch (SQLException e) { - log.error("BoL CHUNK DAO: Rolling back! Unable to complete addNew! " - + "BoLChunkDataTO: {}; exception received: {}", to, e.getMessage(), e); - rollback(con); - } catch (Exception e) { - log.error("BoL CHUNK DAO: unable to complete addNew! BoLChunkDataTO: {}; " - + "exception received: {}", to, e.getMessage(), e); - rollback(con); - } finally { - close(rs_new); - close(addNew); - close(addProtocols); - } - } - - /** - * To be used inside a transaction - * - * @param to - * @param requestQueueID - * @return - * @throws SQLException - * @throws Exception - */ - private synchronized int fillBoLTables(BoLChunkDataTO to, int requestQueueID) - throws SQLException, Exception { - - String str = null; - /* Result set containing the ID of the inserted */ - ResultSet rs_do = null; - /* Result set containing the ID of the inserted */ - ResultSet rs_b = null; - /* Result set containing the ID of the inserted */ - ResultSet rs_s = null; - /* insert TDirOption for request */ - PreparedStatement addDirOption = null; - /* insert request_Bol for request */ - PreparedStatement addBoL = null; - PreparedStatement addChild = null; - - try { - // first fill in TDirOption - str = "INSERT INTO request_DirOption (isSourceADirectory,allLevelRecursive,numOfLevels) VALUES (?,?,?)"; - addDirOption = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - logWarnings(con.getWarnings()); - addDirOption.setBoolean(1, to.getDirOption()); - logWarnings(addDirOption.getWarnings()); - - addDirOption.setBoolean(2, to.getAllLevelRecursive()); - logWarnings(addDirOption.getWarnings()); - - addDirOption.setInt(3, to.getNumLevel()); - logWarnings(addDirOption.getWarnings()); - - log.trace("BoL CHUNK DAO: addNew; {}", addDirOption.toString()); - addDirOption.execute(); - logWarnings(addDirOption.getWarnings()); - - rs_do = addDirOption.getGeneratedKeys(); - int id_do = extractID(rs_do); - - // second fill in request_BoL... sourceSURL and TDirOption! - str = "INSERT INTO request_BoL (request_DirOptionID,request_queueID,sourceSURL,normalized_sourceSURL_StFN,sourceSURL_uniqueID) VALUES (?,?,?,?,?)"; - addBoL = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - logWarnings(con.getWarnings()); - addBoL.setInt(1, id_do); - logWarnings(addBoL.getWarnings()); - - addBoL.setInt(2, requestQueueID); - logWarnings(addBoL.getWarnings()); - - addBoL.setString(3, to.getFromSURL()); - logWarnings(addBoL.getWarnings()); - - addBoL.setString(4, to.normalizedStFN()); - logWarnings(addBoL.getWarnings()); - - addBoL.setInt(5, to.sulrUniqueID()); - logWarnings(addBoL.getWarnings()); - - log.trace("BoL CHUNK DAO: addNew; {}", addBoL.toString()); - addBoL.execute(); - logWarnings(addBoL.getWarnings()); - - rs_b = addBoL.getGeneratedKeys(); - int id_g = extractID(rs_b); - - // third fill in status_BoL... - str = "INSERT INTO status_BoL (request_BoLID,statusCode,explanation) VALUES (?,?,?)"; - addChild = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - logWarnings(con.getWarnings()); - addChild.setInt(1, id_g); - logWarnings(addChild.getWarnings()); - - addChild.setInt(2, to.getStatus()); - logWarnings(addChild.getWarnings()); - - addChild.setString(3, to.getErrString()); - logWarnings(addChild.getWarnings()); - - log.trace("BoL CHUNK DAO: addNew; " + addChild.toString()); - addChild.execute(); - logWarnings(addChild.getWarnings()); - - return id_g; - } finally { - close(rs_do); - close(rs_b); - close(rs_s); - close(addDirOption); - close(addBoL); - close(addChild); - } - } - - /** - * Method used to save the changes made to a retrieved BoLChunkDataTO, back - * into the MySQL DB. Only the fileSize, statusCode and explanation, of - * status_BoL table are written to the DB. Likewise for the request - * pinLifetime. In case of any error, an error message gets logged but no - * exception is thrown. - */ - public synchronized void update(BoLChunkDataTO to) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: update - unable to get a valid connection!"); - return; - } - PreparedStatement updateFileReq = null; - try { - // ready updateFileReq... - updateFileReq = con - .prepareStatement("UPDATE request_queue rq JOIN (status_BoL sb, request_BoL rb) ON (rq.ID=rb.request_queueID AND sb.request_BoLID=rb.ID)" - + " SET sb.fileSize=?, sb.statusCode=?, sb.explanation=?, rq.pinLifetime=?, rb.normalized_sourceSURL_StFN=?, rb.sourceSURL_uniqueID=?" - + " WHERE rb.ID=?"); - logWarnings(con.getWarnings()); - updateFileReq.setLong(1, to.getFileSize()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(2, to.getStatus()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(3, to.getErrString()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(4, to.getLifeTime()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(5, to.normalizedStFN()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(6, to.sulrUniqueID()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setLong(7, to.getPrimaryKey()); - logWarnings(updateFileReq.getWarnings()); - // execute update - log.trace("BoL CHUNK DAO: update method; {}", updateFileReq.toString()); - updateFileReq.executeUpdate(); - logWarnings(updateFileReq.getWarnings()); - } catch (SQLException e) { - log.error("BoL CHUNK DAO: Unable to complete update! {}", e.getMessage(), e); - } finally { - close(updateFileReq); - } - } - - /** - * Updates the request_Bol represented by the received ReducedBoLChunkDataTO - * by setting its normalized_sourceSURL_StFN and sourceSURL_uniqueID - * - * @param chunkTO - */ - public synchronized void updateIncomplete(ReducedBoLChunkDataTO chunkTO) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: updateIncomplete - unable to get a valid connection!"); - return; - } - String str = "UPDATE request_BoL SET normalized_sourceSURL_StFN=?, " - + "sourceSURL_uniqueID=? WHERE ID=?"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - stmt.setString(1, chunkTO.normalizedStFN()); - logWarnings(stmt.getWarnings()); - - stmt.setInt(2, chunkTO.surlUniqueID()); - logWarnings(stmt.getWarnings()); - - stmt.setLong(3, chunkTO.primaryKey()); - logWarnings(stmt.getWarnings()); - - log.trace("BoL CHUNK DAO - update incomplete: {}", stmt.toString()); - stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - } catch (SQLException e) { - log.error("BoL CHUNK DAO: Unable to complete update incomplete! {}", - e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * TODO WARNING! THIS IS A WORK IN PROGRESS!!! Method used to refresh the - * BoLChunkDataTO information from the MySQL DB. In this first version, only - * the statusCode is reloaded from the DB. TODO The next version must contains - * all the information related to the Chunk! In case of any error, an error - * message gets logged but no exception is thrown. - */ - public synchronized BoLChunkDataTO refresh(long primary_key) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: refresh - unable to get a valid connection!"); - return null; - } - String str = null; - PreparedStatement find = null; - ResultSet rs = null; - - try { - // get chunks of the request - str = "SELECT statusCode " + "FROM status_BoL " - + "WHERE request_BoLID=?"; - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - find.setLong(1, primary_key); - - logWarnings(find.getWarnings()); - log.trace("BoL CHUNK DAO: refresh status method; " + find.toString()); - - rs = find.executeQuery(); - - logWarnings(find.getWarnings()); - BoLChunkDataTO aux = null; - while (rs.next()) { - aux = new BoLChunkDataTO(); - aux.setStatus(rs.getInt("statusCode")); - } - return aux; - } catch (SQLException e) { - log.error("BoL CHUNK DAO: {}", e.getMessage(), e); - return null; - } finally { - close(rs); - close(find); - } - } - - /** - * Method that queries the MySQL DB to find all entries matching the supplied - * TRequestToken. The Collection contains the corresponding BoLChunkDataTO - * objects. An initial simple query establishes the list of protocols - * associated with the request. A second complex query establishes all chunks - * associated with the request, by properly joining request_queue, - * request_BoL, status_BoL and request_DirOption. The considered fields are: - * (1) From status_BoL: the ID field which becomes the TOs primary key, and - * statusCode. (2) From request_BoL: sourceSURL (3) From request_queue: - * pinLifetime (4) From request_DirOption: isSourceADirectory, - * alLevelRecursive, numOfLevels In case of any error, a log gets written and - * an empty collection is returned. No exception is thrown. NOTE! Chunks in - * SRM_ABORTED status are NOT returned! - */ - public synchronized Collection find(TRequestToken requestToken) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: find - unable to get a valid connection!"); - return new ArrayList(); - } - String strToken = requestToken.toString(); - String str = null; - PreparedStatement find = null; - ResultSet rs = null; - try { - str = "SELECT tp.config_ProtocolsID " - + "FROM request_TransferProtocols tp JOIN request_queue rq ON tp.request_queueID=rq.ID " - + "WHERE rq.r_token=?"; - - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - ArrayList protocols = new ArrayList(); - find.setString(1, strToken); - logWarnings(find.getWarnings()); - - log.trace("BoL CHUNK DAO: find method; {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - while (rs.next()) { - protocols.add(rs.getString("tp.config_ProtocolsID")); - } - close(rs); - close(find); - - // get chunks of the request - str = "SELECT sb.statusCode, rq.timeStamp, rq.pinLifetime, rq.deferredStartTime, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels " - + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " - + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " - + "LEFT JOIN request_DirOption d ON rb.request_DirOptionID=d.ID " - + "WHERE rq.r_token=? AND sb.statusCode<>?"; - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - ArrayList list = new ArrayList(); - find.setString(1, strToken); - logWarnings(find.getWarnings()); - - find.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(find.getWarnings()); - - log.trace("BoL CHUNK DAO: find method; {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - - BoLChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new BoLChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sb.statusCode")); - chunkDataTO.setLifeTime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setDeferredStartTime(rs.getInt("rq.deferredStartTime")); - chunkDataTO.setRequestToken(strToken); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); - chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); - - chunkDataTO.setNormalizedStFN(rs - .getString("rb.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(new Integer(uniqueID)); - } - - chunkDataTO.setDirOption(rs.getBoolean("d.isSourceADirectory")); - chunkDataTO.setAllLevelRecursive(rs.getBoolean("d.allLevelRecursive")); - chunkDataTO.setNumLevel(rs.getInt("d.numOfLevels")); - chunkDataTO.setProtocolList(protocols); - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("BOL CHUNK DAO: {}", e.getMessage(), e); - /* Return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns a Collection of ReducedBoLChunkDataTO associated to the - * given TRequestToken expressed as String. - */ - public synchronized Collection findReduced( - String reqtoken) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: findReduced - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - // get reduced chunks - String str = "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID " - + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " - + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " - + "WHERE rq.r_token=?"; - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, reqtoken); - logWarnings(find.getWarnings()); - - log.trace("BoL CHUNK DAO! findReduced with request token; {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - - ReducedBoLChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new ReducedBoLChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sb.statusCode")); - chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); - chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); - chunkDataTO.setNormalizedStFN(rs - .getString("rb.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(uniqueID); - } - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("BOL CHUNK DAO: {}", e.getMessage(), e); - /* Return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns a Collection of ReducedBoLChunkDataTO associated to the - * given griduser, and whose SURLs are contained in the supplied array of - * Strings. - */ - public synchronized Collection findReduced( - TRequestToken requestToken, int[] surlUniqueIDs, String[] surls) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: findReduced - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - /* - * NOTE: we search also on the fromSurl because otherwise we lost all - * request_Bol that have not the uniqueID set because are not yet been - * used by anybody - */ - // get reduced chunks - String str = "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID " - + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " - + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " - + "WHERE rq.r_token=? AND ( rb.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlUniqueIDs) - + " AND rb.sourceSURL IN " - + makeSurlString(surls) + " ) "; - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, requestToken.getValue()); - logWarnings(find.getWarnings()); - - log.trace("BoL CHUNK DAO! findReduced with griduser+surlarray; {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - - ReducedBoLChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new ReducedBoLChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sb.statusCode")); - chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); - chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); - chunkDataTO.setNormalizedStFN(rs - .getString("rb.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(uniqueID); - } - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("BoL CHUNK DAO: {}", e.getMessage(), e); - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns a Collection of ReducedBoLChunkDataTO associated to the - * given griduser, and whose SURLs are contained in the supplied array of - * Strings. - */ - public synchronized Collection findReduced( - String griduser, int[] surlUniqueIDs, String[] surls) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: findReduced - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - /* - * NOTE: we search also on the fromSurl because otherwise we lost all - * request_Bol that have not the uniqueID set because are not yet been - * used by anybody - */ - // get reduced chunks - String str = "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID " - + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " - + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " - + "WHERE rq.client_dn=? AND ( rb.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlUniqueIDs) - + " AND rb.sourceSURL IN " - + makeSurlString(surls) + " ) "; - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, griduser); - logWarnings(find.getWarnings()); - - log.trace("BoL CHUNK DAO! findReduced with griduser+surlarray; {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - - ReducedBoLChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new ReducedBoLChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sb.statusCode")); - chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); - chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); - chunkDataTO.setNormalizedStFN(rs - .getString("rb.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(uniqueID); - } - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("BoL CHUNK DAO: {}", e.getMessage(), e); - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns the number of BoL requests on the given SURL, that are - * in SRM_SUCCESS state. This method is intended to be used by BoLChunkCatalog - * in the isSRM_SUCCESS method invocation. In case of any error, 0 is - * returned. - */ - public synchronized int numberInSRM_SUCCESS(int surlUniqueID) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: numberInSRM_SUCCESS - unable to get a valid connection!"); - return 0; - } - String str = "SELECT COUNT(rb.ID) " - + "FROM status_BoL sb JOIN request_BoL rb " - + "ON (sb.request_BoLID=rb.ID) " - + "WHERE rb.sourceSURL_uniqueID=? AND sb.statusCode=?"; - PreparedStatement find = null; - ResultSet rs = null; - try { - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - /* Prepared statement spares DB-specific String notation! */ - find.setInt(1, surlUniqueID); - logWarnings(find.getWarnings()); - - find.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS)); - logWarnings(find.getWarnings()); - - log.trace("BoL CHUNK DAO - numberInSRM_SUCCESS method: {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - - int numberFileSuccessful = 0; - if (rs.next()) { - numberFileSuccessful = rs.getInt(1); - } - return numberFileSuccessful; - } catch (SQLException e) { - log.error("BoL CHUNK DAO! Unable to determine numberInSRM_SUCCESS! " - + "Returning 0! ", e.getMessage(), e); - return 0; - } finally { - close(rs); - close(find); - } - } - - /** - * Method used in extraordinary situations to signal that data retrieved from - * the DB was malformed and could not be translated into the StoRM object - * model. This method attempts to change the status of the request to - * SRM_FAILURE and record it in the DB. This operation could potentially fail - * because the source of the malformed problems could be a problematic DB; - * indeed, initially only log messages where recorded. Yet it soon became - * clear that the source of malformed data were the clients and/or FE - * recording info in the DB. In these circumstances the client would see its - * request as being in the SRM_IN_PROGRESS state for ever. Hence the pressing - * need to inform it of the encountered problems. - */ - public synchronized void signalMalformedBoLChunk(BoLChunkDataTO auxTO) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: signalMalformedBoLChunk - unable to get a valid connection!"); - return; - } - String signalSQL = "UPDATE status_BoL SET statusCode=" - + StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FAILURE) - + ", explanation=? WHERE request_BoLID=" + auxTO.getPrimaryKey(); - PreparedStatement signal = null; - try { - signal = con.prepareStatement(signalSQL); - logWarnings(con.getWarnings()); - /* Prepared statement spares DB-specific String notation! */ - signal.setString(1, "Request is malformed!"); - logWarnings(signal.getWarnings()); - - log.trace("BoL CHUNK DAO: signalMalformed; {}", signal.toString()); - signal.executeUpdate(); - logWarnings(signal.getWarnings()); - } catch (SQLException e) { - log.error("BoLChunkDAO! Unable to signal in DB that the request was " - + "malformed! Request: {}; Exception: {}", auxTO.toString(), - e.toString(), e); - } finally { - close(signal); - } - } - - /** - * Method that updates all expired requests in SRM_SUCCESS state, into - * SRM_RELEASED. This is needed when the client forgets to invoke - * srmReleaseFiles(). - * - * @return - */ - public synchronized List transitExpiredSRM_SUCCESS() { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: transitExpiredSRM_SUCCESS - unable to get a valid connection!"); - return new ArrayList(); - } - - HashMap expiredSurlMap = new HashMap(); - String str = null; - PreparedStatement prepStatement = null; - - /* Find all expired surls */ - try { - // start transaction - con.setAutoCommit(false); - - str = "SELECT rb.sourceSURL , rb.sourceSURL_uniqueID FROM " - + "request_BoL rb JOIN (status_BoL sb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " - + "WHERE sb.statusCode=?" - + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; - - prepStatement = con.prepareStatement(str); - prepStatement.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS)); - - ResultSet res = prepStatement.executeQuery(); - logWarnings(prepStatement.getWarnings()); - - while (res.next()) { - String sourceSURL = res.getString("rb.sourceSURL"); - Integer uniqueID = new Integer(res.getInt("rb.sourceSURL_uniqueID")); - /* If the uniqueID is not setted compute it */ - if (res.wasNull()) { - try { - TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); - uniqueID = tsurl.uniqueId(); - } catch (InvalidTSURLAttributesException e) { - log.warn("BoLChunkDAO! unable to build the TSURL from {}: " - + "InvalidTSURLAttributesException ", sourceSURL, e.getMessage()); - } - } - expiredSurlMap.put(sourceSURL, uniqueID); - } - - if (expiredSurlMap.isEmpty()) { - commit(con); - log.trace("BoLChunkDAO! No chunk of BoL request was transited from " - + "SRM_SUCCESS to SRM_RELEASED."); - return new ArrayList(); - } - } catch (SQLException e) { - log.error("BoLChunkDAO! SQLException.", e.getMessage(), e); - rollback(con); - return new ArrayList(); - } finally { - close(prepStatement); - } - - /* Update status of all successful surls to SRM_RELEASED */ - - prepStatement = null; - try { - - str = "UPDATE " - + "status_BoL sb JOIN (request_BoL rb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " - + "SET sb.statusCode=? " - + "WHERE sb.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; - - prepStatement = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - prepStatement.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED)); - logWarnings(prepStatement.getWarnings()); - - prepStatement.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS)); - logWarnings(prepStatement.getWarnings()); - - log.trace("BoL CHUNK DAO - transitExpiredSRM_SUCCESS method: {}", - prepStatement.toString()); - - int count = prepStatement.executeUpdate(); - logWarnings(prepStatement.getWarnings()); - - if (count == 0) { - log.trace("BoLChunkDAO! No chunk of BoL request was" - + " transited from SRM_SUCCESS to SRM_RELEASED."); - } else { - log.info("BoLChunkDAO! {} chunks of BoL requests were transited from " - + "SRM_SUCCESS to SRM_RELEASED.", count); - } - } catch (SQLException e) { - log.error("BoLChunkDAO! Unable to transit expired SRM_SUCCESS chunks of " - + "BoL requests, to SRM_RELEASED! ", e.getMessage(), e); - rollback(con); - return new ArrayList(); - } finally { - close(prepStatement); - } - - /* - * in order to enhance performance here we can check if there is any file - * system with tape (T1D0, T1D1), if there is not any we can skip the - * following - */ - - /* Find all not expired surls from PtG */ - - HashSet pinnedSurlSet = new HashSet(); - try { - // SURLs pinned by BoLs - str = "SELECT rb.sourceSURL , rb.sourceSURL_uniqueID FROM " - + "request_BoL rb JOIN (status_BoL sb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " - + "WHERE sb.statusCode=" - + StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS) - + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime "; - - ResultSet res = null; - - prepStatement = con.prepareStatement(str); - res = prepStatement.executeQuery(); - logWarnings(prepStatement.getWarnings()); - - while (res.next()) { - String sourceSURL = res.getString("rb.sourceSURL"); - Integer uniqueID = new Integer(res.getInt("rb.sourceSURL_uniqueID")); - /* If the uniqueID is not setted compute it */ - if (res.wasNull()) { - try { - TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); - uniqueID = tsurl.uniqueId(); - } catch (InvalidTSURLAttributesException e) { - log.warn("BoLChunkDAO! unable to build the TSURL from {}: " - + "InvalidTSURLAttributesException ", sourceSURL, e.getMessage()); - } - } - pinnedSurlSet.add(uniqueID); - } - - close(prepStatement); - - str = "SELECT rg.sourceSURL , rg.sourceSURL_uniqueID FROM " - + "request_Get rg JOIN (status_Get sg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "WHERE sg.statusCode=?" - + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime "; - - prepStatement = con.prepareStatement(str); - - prepStatement.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED)); - - res = prepStatement.executeQuery(); - logWarnings(prepStatement.getWarnings()); - - while (res.next()) { - String sourceSURL = res.getString("rg.sourceSURL"); - Integer uniqueID = new Integer(res.getInt("rg.sourceSURL_uniqueID")); - /* If the uniqueID is not setted compute it */ - if (res.wasNull()) { - try { - TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); - uniqueID = tsurl.uniqueId(); - } catch (InvalidTSURLAttributesException e) { - log.warn("BoLChunkDAO! unable to build the TSURL from {}: " - + "InvalidTSURLAttributesException {}", sourceSURL, e.getMessage()); - } - } - pinnedSurlSet.add(uniqueID); - } - - commit(con); - - } catch (SQLException e) { - log.error("BoLChunkDAO! SQLException. {}", e.getMessage(), e); - rollback(con); - } finally { - close(prepStatement); - } - - /* Remove the Extended Attribute pinned if there is not a valid surl on it */ - ArrayList expiredSurlList = new ArrayList(); - TSURL surl; - for (Entry surlEntry : expiredSurlMap.entrySet()) { - if (!pinnedSurlSet.contains(surlEntry.getValue())) { - try { - surl = TSURL.makeFromStringValidate(surlEntry.getKey()); - } catch (InvalidTSURLAttributesException e) { - log.error("Invalid SURL, cannot release the pin " - + "(Extended Attribute): {}", surlEntry.getKey()); - continue; - } - expiredSurlList.add(surl); - StoRI stori; - try { - stori = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl); - } catch (Throwable e) { - log.error("Invalid SURL {} cannot release the pin. {}: {}", - surlEntry.getKey(), e.getClass().getCanonicalName(), e.getMessage()); - continue; - } - - if (stori.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) { - StormEA.removePinned(stori.getAbsolutePath()); - } - } - } - return expiredSurlList; - } - - /** - * Method that transits chunks in SRM_SUCCESS to SRM_ABORTED, for the given - * SURL: the overall request status of the requests containing that chunk, is - * not changed! The TURL is set to null. Beware, that the chunks may be part - * of requests that have finished, or that still have not finished because - * other chunks are still being processed. - */ - public synchronized void transitSRM_SUCCESStoSRM_ABORTED(int surlUniqueID, - String surl, String explanation) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: transitSRM_SUCCESStoSRM_ABORTED - unable to get a valid connection!"); - return; - } - String str = "UPDATE " - + "status_BoL sb JOIN request_BoL rb ON sb.request_BoLID=rb.ID " - + "SET sb.statusCode=?, sb.explanation=?, sb.transferURL=NULL " - + "WHERE sb.statusCode=? AND (rb.sourceSURL_uniqueID=? OR rb.targetSURL=?)"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - logWarnings(con.getWarnings()); - stmt.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(stmt.getWarnings()); - - stmt.setString(2, explanation); - logWarnings(stmt.getWarnings()); - - stmt.setInt(3, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS)); - logWarnings(stmt.getWarnings()); - - stmt.setInt(4, surlUniqueID); - logWarnings(stmt.getWarnings()); - - stmt.setString(5, surl); - logWarnings(stmt.getWarnings()); - - log.trace("BoL CHUNK DAO - transitSRM_SUCCESStoSRM_ABORTED: {}", stmt.toString()); - int count = stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - if (count > 0) { - log.info("BoL CHUNK DAO! {} chunks were transited from SRM_SUCCESS " - + "to SRM_ABORTED.", count); - } else { - log.trace("BoL CHUNK DAO! No chunks were transited from SRM_SUCCESS " - + "to SRM_ABORTED."); - } - } catch (SQLException e) { - log.error("BoL CHUNK DAO! Unable to transitSRM_SUCCESStoSRM_ABORTED! {}", - e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method that updates all chunks in SRM_SUCCESS state, into SRM_RELEASED. An - * array of long representing the primary key of each chunk is required: only - * they get the status changed provided their current status is SRM_SUCCESS. - * This method is used during srmReleaseFiles In case of any error nothing - * happens and no exception is thrown, but proper messages get logged. - */ - public synchronized void transitSRM_SUCCESStoSRM_RELEASED(long[] ids) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: transitSRM_SUCCESStoSRM_RELEASED - unable to get a valid connection!"); - return; - } - String str = "UPDATE status_BoL SET statusCode=? " - + "WHERE statusCode=? AND request_BoLID IN " + makeWhereString(ids); - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - logWarnings(con.getWarnings()); - stmt.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED)); - logWarnings(stmt.getWarnings()); - - stmt.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS)); - logWarnings(stmt.getWarnings()); - - log.trace("BoL CHUNK DAO - transitSRM_SUCCESStoSRM_RELEASED: {}", - stmt.toString()); - int count = stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("BoL CHUNK DAO! No chunk of BoL request " - + "was transited from SRM_SUCCESS to SRM_RELEASED."); - } else { - log.info("BoL CHUNK DAO! {} chunks of BoL requests were transited " - + "from SRM_SUCCESS to SRM_RELEASED.", count); - } - } catch (SQLException e) { - log.error("BoL CHUNK DAO! Unable to transit chunks from SRM_SUCCESS " - + "to SRM_RELEASED! {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - public synchronized void transitSRM_SUCCESStoSRM_RELEASED(long[] ids, - TRequestToken token) { - - if (token == null) { - transitSRM_SUCCESStoSRM_RELEASED(ids); - } else { - /* - * If a request token has been specified, only the related BoL requests - * have to be released. This is done adding the r.r_token="..." clause in - * the where subquery. - */ - if (!checkConnection()) { - log.error("BoL CHUNK DAO: transitSRM_SUCCESStoSRM_RELEASED - unable to get a valid connection!"); - return; - } - String str = "UPDATE " - + "status_BoL sb JOIN (request_BoL rb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " - + "SET sb.statusCode=? " + "WHERE sb.statusCode=? AND rq.r_token='" - + token.toString() + "' AND rb.ID IN " + makeWhereString(ids); - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - logWarnings(con.getWarnings()); - stmt.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED)); - logWarnings(stmt.getWarnings()); - - stmt.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS)); - logWarnings(stmt.getWarnings()); - - log.trace("BoL CHUNK DAO - transitSRM_SUCCESStoSRM_RELEASED: {}", - stmt.toString()); - int count = stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("BoL CHUNK DAO! No chunk of BoL request was " - + "transited from SRM_SUCCESS to SRM_RELEASED."); - } else { - log.info("BoL CHUNK DAO! {} chunks of BoL requests were transited " - + "from SRM_SUCCESS to SRM_RELEASED.", count); - } - } catch (SQLException e) { - log.error("BoL CHUNK DAO! Unable to transit chunks " - + "from SRM_SUCCESS to SRM_RELEASED! {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - } - - /** - * Auxiliary method used to close a ResultSet - */ - private void close(ResultSet rset) { - - if (rset != null) { - try { - rset.close(); - } catch (Exception e) { - log.error("BoL CHUNK DAO! Unable to close ResultSet! Exception: " + e); - } - } - } - - /** - * Auxiliary method used to close a Statement - */ - private void close(Statement stmt) { - - if (stmt != null) { - try { - stmt.close(); - } catch (Exception e) { - log.error("BoL CHUNK DAO! Unable to close Statement {} - Exception: {}", - stmt.toString(), e.getMessage(), e); - } - } - } - - private void commit(Connection con) { - - if (con != null) { - try { - con.commit(); - con.setAutoCommit(true); - } catch (SQLException e) { - log.error("BoL, SQL EXception {}", e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to roll back a failed transaction - */ - private void rollback(Connection con) { - - if (con != null) { - try { - con.rollback(); - con.setAutoCommit(true); - log.error("BoL CHUNK DAO: roll back successful!"); - } catch (SQLException e2) { - log.error("BoL CHUNK DAO: roll back failed! {}", e2.getMessage(), e2); - } - } - } - - /** - * Private method that returns the generated ID: it throws an exception in - * case of any problem! - */ - private int extractID(ResultSet rs) throws Exception { - - if (rs == null) { - throw new Exception("BoL CHUNK DAO! Null ResultSet!"); - } - if (rs.next()) { - return rs.getInt(1); - } - log.error("BoL CHUNK DAO! It was not possible to establish " - + "the assigned autoincrement primary key!"); - throw new Exception( - "BoL CHUNK DAO! It was not possible to establish the assigned autoincrement primary key!"); - } - - /** - * Auxiliary private method that logs all SQL warnings. - */ - private void logWarnings(SQLWarning w) { - - if (w != null) { - log.debug("BoL CHUNK DAO: {}", w.toString()); - while ((w = w.getNextWarning()) != null) { - log.debug("BoL CHUNK DAO: {}", w.toString()); - } - } - } - - /** - * Method that returns a String containing all IDs. - */ - private String makeWhereString(long[] rowids) { - - StringBuilder sb = new StringBuilder("("); - int n = rowids.length; - for (int i = 0; i < n; i++) { - sb.append(rowids[i]); - if (i < (n - 1)) { - sb.append(","); - } - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns a String containing all Surl's IDs. - */ - private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) { - - StringBuilder sb = new StringBuilder("("); - for (int i = 0; i < surlUniqueIDs.length; i++) { - if (i > 0) { - sb.append(","); - } - sb.append(surlUniqueIDs[i]); - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns a String containing all Surls. - */ - private String makeSurlString(String[] surls) { - - StringBuilder sb = new StringBuilder("("); - int n = surls.length; - - for (int i = 0; i < n; i++) { - - SURL requestedSURL; - - try { - requestedSURL = SURL.makeSURLfromString(surls[i]); - } catch (NamespaceException e) { - log.error(e.getMessage()); - log.debug("Skip '{}' during query creation", surls[i]); - continue; - } - - sb.append("'"); - sb.append(requestedSURL.getNormalFormAsString()); - sb.append("','"); - sb.append(requestedSURL.getQueryFormAsString()); - sb.append("'"); - - if (i < (n - 1)) { - sb.append(","); - } - } - - sb.append(")"); - return sb.toString(); - } - - /** - * Auxiliary method that sets up the connection to the DB, as well as the - * prepared statement. - */ - private boolean setUpConnection() { - - boolean response = false; - try { - Class.forName(driver); - con = DriverManager.getConnection(url, name, password); - logWarnings(con.getWarnings()); - response = con.isValid(0); - } catch (ClassNotFoundException | SQLException e) { - log.error("BoL CHUNK DAO! Exception in setUpConnection! {}", e.getMessage(), e); - } - return response; - } - - /** - * Auxiliary method that checks if time for resetting the connection has come, - * and eventually takes it down and up back again. - */ - private boolean checkConnection() { - - boolean response = true; - if (reconnect) { - log.debug("BoL CHUNK DAO! Reconnecting to DB! "); - takeDownConnection(); - response = setUpConnection(); - if (response) { - reconnect = false; - } - } - return response; - } - - /** - * Auxiliary method that tales down a connection to the DB. - */ - private void takeDownConnection() { - - if (con != null) { - try { - con.close(); - } catch (SQLException e) { - log.error("BoL CHUNK DAO! Exception in takeDownConnection method: {}", - e.getMessage(), e); - } - } - } - - public synchronized void updateStatusOnMatchingStatus( - TRequestToken requestToken, TStatusCode expectedStatusCode, - TStatusCode newStatusCode, String explanation) { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || explanation == null) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken + " explanation=" - + explanation); - } - doUpdateStatusOnMatchingStatus(requestToken, null, null, - expectedStatusCode, newStatusCode, explanation, true, false, true); - } - - public synchronized void updateStatusOnMatchingStatus( - TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls, - TStatusCode expectedStatusCode, TStatusCode newStatusCode) - throws IllegalArgumentException { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || surlsUniqueIDs == null || surls == null || surlsUniqueIDs.length == 0 - || surls.length == 0 || surlsUniqueIDs.length != surls.length) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken - + "surlsUniqueIDs=" + surlsUniqueIDs + " surls=" + surls); - } - doUpdateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode, null, true, true, false); - } - - public synchronized void doUpdateStatusOnMatchingStatus( - TRequestToken requestToken, int[] surlUniqueIDs, String[] surls, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation, boolean withRequestToken, boolean withSurls, - boolean withExplanation) throws IllegalArgumentException { - - if ((withRequestToken && requestToken == null) - || (withExplanation && explanation == null) - || (withSurls && (surlUniqueIDs == null || surls == null))) { - throw new IllegalArgumentException( - "Unable to perform the doUpdateStatusOnMatchingStatus, " - + "invalid arguments: withRequestToken=" + withRequestToken - + " requestToken=" + requestToken + " withSurls=" + withSurls - + " surlUniqueIDs=" + surlUniqueIDs + " surls=" + surls - + " withExplaination=" + withExplanation + " explanation=" - + explanation); - } - if (!checkConnection()) { - log - .error("BOL CHUNK DAO: updateStatusOnMatchingStatus - unable to get a valid connection!"); - return; - } - String str = "UPDATE status_BoL sb JOIN (request_BoL rb, request_queue rq) " - + "ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " - + "SET sb.statusCode=? "; - if (withExplanation) { - str += " , " + buildExpainationSet(explanation); - } - str += " WHERE sb.statusCode=? "; - if (withRequestToken) { - str += " AND " + buildTokenWhereClause(requestToken); - } - if (withSurls) { - str += " AND " + buildSurlsWhereClause(surlUniqueIDs, surls); - } - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - logWarnings(con.getWarnings()); - stmt.setInt(1, StatusCodeConverter.getInstance().toDB(newStatusCode)); - logWarnings(stmt.getWarnings()); - - stmt - .setInt(2, StatusCodeConverter.getInstance().toDB(expectedStatusCode)); - logWarnings(stmt.getWarnings()); - - log.trace("BOL CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt.toString()); - int count = stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("BOL CHUNK DAO! No chunk of BOL request was updated from {} " - + "to {}.", expectedStatusCode, newStatusCode); - } else { - log.debug("BOL CHUNK DAO! {} chunks of BOL requests were updated " - + "from {} to {}.", count, expectedStatusCode, newStatusCode); - } - } catch (SQLException e) { - log.error("BOL CHUNK DAO! Unable to updated from {} to {}!", - expectedStatusCode, newStatusCode, e); - } finally { - close(stmt); - } - } - - public Collection find(int[] surlsUniqueIDs, - String[] surlsArray, String dn) throws IllegalArgumentException { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0 || dn == null) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " dn=" + dn); - } - return find(surlsUniqueIDs, surlsArray, dn, true); - } - - public Collection find(int[] surlsUniqueIDs, - String[] surlsArray) throws IllegalArgumentException { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray); - } - return find(surlsUniqueIDs, surlsArray, null, false); - } - - private synchronized Collection find(int[] surlsUniqueIDs, - String[] surlsArray, String dn, boolean withDn) - throws IllegalArgumentException { - - if ((withDn && dn == null) || surlsUniqueIDs == null - || surlsUniqueIDs.length == 0 || surlsArray == null - || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn); - } - if (!checkConnection()) { - log.error("BoL CHUNK DAO: find - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - // get chunks of the request - String str = "SELECT rq.ID, rq.r_token, sb.statusCode, rq.timeStamp, rq.pinLifetime, " - + "rq.deferredStartTime, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, " - + "rb.sourceSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels " - + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " - + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " - + "LEFT JOIN request_DirOption d ON rb.request_DirOptionID=d.ID " - + "WHERE ( rb.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) - + " AND rb.sourceSURL IN " - + makeSurlString(surlsArray) + " )"; - if (withDn) { - str += " AND rq.client_dn=\'" + dn + "\'"; - } - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - List list = new ArrayList(); - - log.trace("BOL CHUNK DAO - find method: {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - BoLChunkDataTO chunkDataTO = null; - while (rs.next()) { - - chunkDataTO = new BoLChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sb.statusCode")); - chunkDataTO.setLifeTime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setDeferredStartTime(rs.getInt("rq.deferredStartTime")); - chunkDataTO.setRequestToken(rs.getString("rq.r_token")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); - chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); - - chunkDataTO.setNormalizedStFN(rs - .getString("rb.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(new Integer(uniqueID)); - } - - chunkDataTO.setDirOption(rs.getBoolean("d.isSourceADirectory")); - chunkDataTO.setAllLevelRecursive(rs.getBoolean("d.allLevelRecursive")); - chunkDataTO.setNumLevel(rs.getInt("d.numOfLevels")); - - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("BOL CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - private String buildExpainationSet(String explanation) { - - return " sb.explanation='" + explanation + "' "; - } - - private String buildTokenWhereClause(TRequestToken requestToken) { - - return " rq.r_token='" + requestToken.toString() + "' "; - } - - private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) { - - return " ( rb.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rb.sourceSURL IN " - + makeSurlString(surls) + " ) "; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/BoLChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/BoLChunkDataTO.java deleted file mode 100644 index 4600758d5..000000000 --- a/src/main/java/it/grid/storm/catalogs/BoLChunkDataTO.java +++ /dev/null @@ -1,256 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.namespace.model.Protocol; -import it.grid.storm.srm.types.TStatusCode; - -import java.sql.Timestamp; -import java.util.List; - -/** - * Class that represents a row in the Persistence Layer: this is all raw data - * referring to the BoLChunkData proper, that is, String and primitive types. - * - * Each field is initialized with default values as per SRM 2.2 specification: - * protocolList GSIFTP dirOption false status SRM_REQUEST_QUEUED - * - * All other fields are 0 if int, or a white space if String. - * - * @author CNAF - * @version 1.0 - * @date Aug 2009 - */ -public class BoLChunkDataTO { - - /* Database table request_Bol fields BEGIN */ - private long primaryKey = -1; // ID primary key of record in DB - private String fromSURL = " "; - private boolean dirOption; // initialised in constructor - private String normalizedStFN = null; - private Integer surlUniqueID = null; - /* Database table request_Get fields END */ - - private String requestToken = " "; - private int lifetime = 0; - private boolean allLevelRecursive; // initialised in constructor - private int numLevel; // initialised in constructor - private List protocolList = null; // initialised in constructor - private long filesize = 0; - private int status; // initialised in constructor - private String errString = " "; - private int deferredStartTime = -1; - private Timestamp timeStamp = null; - - public BoLChunkDataTO() { - - TURLPrefix protocolPreferences = new TURLPrefix(); - protocolPreferences.addProtocol(Protocol.GSIFTP); - this.protocolList = TransferProtocolListConverter.toDB(protocolPreferences); - this.status = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_QUEUED); - this.dirOption = false; - this.allLevelRecursive = false; - this.numLevel = 0; - } - - public boolean getAllLevelRecursive() { - - return allLevelRecursive; - } - - public int getDeferredStartTime() { - - return deferredStartTime; - } - - public boolean getDirOption() { - - return dirOption; - } - - public String getErrString() { - - return errString; - } - - public long getFileSize() { - - return filesize; - } - - public String getFromSURL() { - - return fromSURL; - } - - public int getLifeTime() { - - return lifetime; - } - - public int getNumLevel() { - - return numLevel; - } - - public long getPrimaryKey() { - - return primaryKey; - } - - public List getProtocolList() { - - return protocolList; - } - - public String getRequestToken() { - - return requestToken; - } - - public Timestamp getTimeStamp() { - - return timeStamp; - } - - public int getStatus() { - - return status; - } - - public void setAllLevelRecursive(boolean b) { - - allLevelRecursive = b; - } - - public void setDeferredStartTime(int deferredStartTime) { - - this.deferredStartTime = deferredStartTime; - } - - public void setDirOption(boolean b) { - - dirOption = b; - } - - public void setErrString(String s) { - - errString = s; - } - - public void setFileSize(long n) { - - filesize = n; - } - - public void setFromSURL(String s) { - - fromSURL = s; - } - - public void setLifeTime(int n) { - - lifetime = n; - } - - public void setNumLevel(int n) { - - numLevel = n; - } - - public void setPrimaryKey(long n) { - - primaryKey = n; - } - - public void setProtocolList(List l) { - - if ((l != null) && (!l.isEmpty())) { - protocolList = l; - } - } - - public void setRequestToken(String s) { - - requestToken = s; - } - - public void setTimeStamp(Timestamp timeStamp) { - - this.timeStamp = timeStamp; - } - - public void setStatus(int n) { - - status = n; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedStFN(String normalizedStFN) { - - this.normalizedStFN = normalizedStFN; - } - - /** - * @return the normalizedStFN - */ - public String normalizedStFN() { - - return normalizedStFN; - } - - /** - * @param surlUniqueID - * the sURLUniqueID to set - */ - public void setSurlUniqueID(Integer surlUniqueID) { - - this.surlUniqueID = surlUniqueID; - } - - /** - * @return the sURLUniqueID - */ - public Integer sulrUniqueID() { - - return surlUniqueID; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(primaryKey); - sb.append(" "); - sb.append(requestToken); - sb.append(" "); - sb.append(fromSURL); - sb.append(" "); - sb.append(normalizedStFN); - sb.append(" "); - sb.append(surlUniqueID); - sb.append(" "); - sb.append(lifetime); - sb.append(" "); - sb.append(dirOption); - sb.append(" "); - sb.append(allLevelRecursive); - sb.append(" "); - sb.append(numLevel); - sb.append(" "); - sb.append(protocolList); - sb.append(" "); - sb.append(filesize); - sb.append(" "); - sb.append(status); - sb.append(" "); - sb.append(errString); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/CopyChunkCatalog.java b/src/main/java/it/grid/storm/catalogs/CopyChunkCatalog.java deleted file mode 100644 index 0dedd963f..000000000 --- a/src/main/java/it/grid/storm/catalogs/CopyChunkCatalog.java +++ /dev/null @@ -1,476 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TimeUnit; -import it.grid.storm.griduser.GridUserInterface; -// import it.grid.storm.namespace.SurlStatusStore; -import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; -import it.grid.storm.srm.types.InvalidTSURLAttributesException; -import it.grid.storm.srm.types.InvalidTSpaceTokenAttributesException; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TStatusCode; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Class that represents StoRMs CopyChunkCatalog: it collects CopyChunkData and - * provides methods for looking up a CopyChunkData based on TRequestToken, as - * well as for updating an existing one. - * - * @author EGRID - ICTP Trieste - * @date september, 2005 - * @version 2.0 - */ -public class CopyChunkCatalog { - - private static final Logger log = LoggerFactory - .getLogger(CopyChunkCatalog.class); - - /* only instance of CopyChunkCatalog present in StoRM! */ - private static final CopyChunkCatalog cat = new CopyChunkCatalog(); - /* WARNING!!! TO BE MODIFIED WITH FACTORY!!! */ - private CopyChunkDAO dao = CopyChunkDAO.getInstance(); - - private CopyChunkCatalog() { - - } - - /** - * Method that returns the only instance of PtPChunkCatalog available. - */ - public static CopyChunkCatalog getInstance() { - - return cat; - } - - /** - * Method used to update into Persistence a retrieved CopyChunkData. In case - * any error occurs, the operation does not proceed and no Exception is - * thrown. - * - * Beware that the only fields updated into persistence are the StatusCode and - * the errorString. - */ - synchronized public void update(CopyPersistentChunkData cd) { - - CopyChunkDataTO to = new CopyChunkDataTO(); - /* primary key needed by DAO Object */ - to.setPrimaryKey(cd.getPrimaryKey()); - to.setLifeTime(FileLifetimeConverter.getInstance().toDB( - cd.getLifetime().value())); - to.setStatus(StatusCodeConverter.getInstance().toDB( - cd.getStatus().getStatusCode())); - to.setErrString(cd.getStatus().getExplanation()); - to.setFileStorageType(FileStorageTypeConverter.getInstance().toDB( - cd.getFileStorageType())); - to.setOverwriteOption(OverwriteModeConverter.getInstance().toDB( - cd.getOverwriteOption())); - to.setNormalizedSourceStFN(cd.getSURL().normalizedStFN()); - to.setSourceSurlUniqueID(new Integer(cd.getSURL().uniqueId())); - to.setNormalizedTargetStFN(cd.getDestinationSURL().normalizedStFN()); - to.setTargetSurlUniqueID(new Integer(cd.getDestinationSURL().uniqueId())); - - dao.update(to); - } - - /** - * Method that returns a Collection of CopyChunkData Objects matching the - * supplied TRequestToken. - * - * If any of the data associated to the TRequestToken is not well formed and - * so does not allow a CopyChunkData Object to be created, then that part of - * the request is dropped and gets logged, and the processing continues with - * the next part. All valid chunks get returned: the others get dropped. - * - * If there are no chunks to process then an empty Collection is returned, and - * a messagge gets logged. - */ - synchronized public Collection lookup( - TRequestToken rt) { - - Collection chunkDataTOs = dao.find(rt); - log.debug("COPY CHUNK CATALOG: retrieved data {}", chunkDataTOs); - return buildChunkDataList(chunkDataTOs, rt); - } - - private Collection buildChunkDataList( - Collection chunkDataTOs, TRequestToken rt) { - - ArrayList list = new ArrayList(); - CopyPersistentChunkData chunk; - for (CopyChunkDataTO chunkTO : chunkDataTOs) { - chunk = makeOne(chunkTO, rt); - if (chunk == null) { - continue; - } - list.add(chunk); - if (isComplete(chunkTO)) { - continue; - } - try { - dao.updateIncomplete(completeTO(chunkTO, chunk)); - } catch (InvalidReducedCopyChunkDataAttributesException e) { - log.warn("COPY CHUNK CATALOG! unable to add missing informations on " - + "DB to the request: {}", e.getMessage()); - } - } - log.debug("COPY CHUNK CATALOG: returning {}\n\n", list); - return list; - } - - private Collection buildChunkDataList( - Collection chunkDataTOs) { - - ArrayList list = new ArrayList(); - CopyPersistentChunkData chunk; - for (CopyChunkDataTO chunkTO : chunkDataTOs) { - chunk = makeOne(chunkTO); - if (chunk == null) { - continue; - } - list.add(chunk); - if (isComplete(chunkTO)) { - continue; - } - try { - dao.updateIncomplete(completeTO(chunkTO, chunk)); - } catch (InvalidReducedCopyChunkDataAttributesException e) { - log.warn("COPY CHUNK CATALOG! unable to add missing informations on DB " - + "to the request: {}", e.getMessage()); - } - } - log.debug("COPY CHUNK CATALOG: returning {}\n\n", list); - return list; - } - - public Collection lookupCopyChunkData( - TRequestToken requestToken, Collection surls) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOs = dao.find(requestToken, - surlsUniqueIDs, surlsArray); - return buildChunkDataList(chunkDataTOs, requestToken); - } - - public Collection lookupCopyChunkData(TSURL surl, - GridUserInterface user) { - - return lookupCopyChunkData(Arrays.asList(new TSURL[] { surl }), user); - } - - public Collection lookupCopyChunkData(TSURL surl) { - - return lookupCopyChunkData(Arrays.asList(new TSURL[] { surl })); - } - - private Collection lookupCopyChunkData( - List surls, GridUserInterface user) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOs = dao.find(surlsUniqueIDs, - surlsArray, user.getDn()); - return buildChunkDataList(chunkDataTOs); - } - - public Collection lookupCopyChunkData( - List surls) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOs = dao.find(surlsUniqueIDs, - surlsArray); - return buildChunkDataList(chunkDataTOs); - } - - private CopyPersistentChunkData makeOne(CopyChunkDataTO chunkTO) { - - try { - return makeOne(chunkTO, - new TRequestToken(chunkTO.requestToken(), chunkTO.timeStamp())); - } catch (InvalidTRequestTokenAttributesException e) { - throw new IllegalStateException( - "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: " - + e); - } - } - - /** - * Generates a CopyChunkData from the received CopyChunkDataTO - * - * @param chunkDataTO - * @param rt - * @return - */ - private CopyPersistentChunkData makeOne(CopyChunkDataTO chunkDataTO, - TRequestToken rt) { - - StringBuilder errorSb = new StringBuilder(); - // fromSURL - TSURL fromSURL = null; - try { - fromSURL = TSURL.makeFromStringValidate(chunkDataTO.fromSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (chunkDataTO.normalizedSourceStFN() != null) { - fromSURL.setNormalizedStFN(chunkDataTO.normalizedSourceStFN()); - } - if (chunkDataTO.sourceSurlUniqueID() != null) { - fromSURL.setUniqueID(chunkDataTO.sourceSurlUniqueID().intValue()); - } - // toSURL - TSURL toSURL = null; - try { - toSURL = TSURL.makeFromStringValidate(chunkDataTO.toSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (chunkDataTO.normalizedTargetStFN() != null) { - toSURL.setNormalizedStFN(chunkDataTO.normalizedTargetStFN()); - } - if (chunkDataTO.targetSurlUniqueID() != null) { - toSURL.setUniqueID(chunkDataTO.targetSurlUniqueID().intValue()); - } - // lifeTime - TLifeTimeInSeconds lifeTime = null; - try { - lifeTime = TLifeTimeInSeconds.make(FileLifetimeConverter.getInstance() - .toStoRM(chunkDataTO.lifeTime()), TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // fileStorageType - TFileStorageType fileStorageType = FileStorageTypeConverter.getInstance() - .toSTORM(chunkDataTO.fileStorageType()); - if (fileStorageType == TFileStorageType.EMPTY) { - log.error("\nTFileStorageType could not be translated from its String " - + "representation! String: {}", chunkDataTO.fileStorageType()); - // fail creation of PtPChunk! - fileStorageType = null; - } - // spaceToken! - // - // WARNING! Although this field is in common between StoRM and DPM, a - // converter is still used - // because DPM logic for NULL/EMPTY is not known. StoRM model does not - // allow for null, so it must - // be taken care of! - TSpaceToken spaceToken = null; - TSpaceToken emptyToken = TSpaceToken.makeEmpty(); - // convert empty string representation of DPM into StoRM representation; - String spaceTokenTranslation = SpaceTokenStringConverter.getInstance() - .toStoRM(chunkDataTO.spaceToken()); - if (emptyToken.toString().equals(spaceTokenTranslation)) { - spaceToken = emptyToken; - } else { - try { - spaceToken = TSpaceToken.make(spaceTokenTranslation); - } catch (InvalidTSpaceTokenAttributesException e) { - errorSb.append("\n"); - errorSb.append(e); - } - } - // overwriteOption! - TOverwriteMode globalOverwriteOption = OverwriteModeConverter.getInstance() - .toSTORM(chunkDataTO.overwriteOption()); - if (globalOverwriteOption == TOverwriteMode.EMPTY) { - errorSb.append("\nTOverwriteMode could not be " - + "translated from its String representation! String: " - + chunkDataTO.overwriteOption()); - globalOverwriteOption = null; - } - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM( - chunkDataTO.status()); - if (code == TStatusCode.EMPTY) { - errorSb.append("\nRetrieved StatusCode was not recognised: " - + chunkDataTO.status()); - } else { - status = new TReturnStatus(code, chunkDataTO.errString()); - } - // make CopyChunkData - CopyPersistentChunkData aux = null; - try { - aux = new CopyPersistentChunkData(rt, fromSURL, toSURL, lifeTime, - fileStorageType, spaceToken, globalOverwriteOption, status); - aux.setPrimaryKey(chunkDataTO.primaryKey()); - } catch (InvalidSurlRequestDataAttributesException e) { - dao.signalMalformedCopyChunk(chunkDataTO); - log.warn("COPY CHUNK CATALOG! Retrieved malformed Copy" - + " chunk data from persistence. Dropping chunk from request: {}", rt); - log.warn(e.getMessage()); - log.warn(errorSb.toString()); - } - // end... - return aux; - } - - /** - * - * Adds to the received CopyChunkDataTO the normalized StFN and the SURL - * unique ID taken from the CopyChunkData - * - * @param chunkTO - * @param chunk - */ - private void completeTO(ReducedCopyChunkDataTO chunkTO, - final ReducedCopyChunkData chunk) { - - chunkTO.setNormalizedSourceStFN(chunk.fromSURL().normalizedStFN()); - chunkTO.setSourceSurlUniqueID(new Integer(chunk.fromSURL().uniqueId())); - chunkTO.setNormalizedTargetStFN(chunk.toSURL().normalizedStFN()); - chunkTO.setTargetSurlUniqueID(new Integer(chunk.toSURL().uniqueId())); - } - - /** - * - * Creates a ReducedCopyChunkDataTO from the received CopyChunkDataTO and - * completes it with the normalized StFN and the SURL unique ID taken from the - * PtGChunkData - * - * @param chunkTO - * @param chunk - * @return - * @throws InvalidReducedCopyChunkDataAttributesException - */ - private ReducedCopyChunkDataTO completeTO(CopyChunkDataTO chunkTO, - final CopyPersistentChunkData chunk) - throws InvalidReducedCopyChunkDataAttributesException { - - ReducedCopyChunkDataTO reducedChunkTO = this.reduce(chunkTO); - this.completeTO(reducedChunkTO, this.reduce(chunk)); - return reducedChunkTO; - } - - /** - * Creates a ReducedCopyChunkData from the data contained in the received - * CopyChunkData - * - * @param chunk - * @return - * @throws InvalidReducedPtGChunkDataAttributesException - */ - private ReducedCopyChunkData reduce(CopyPersistentChunkData chunk) - throws InvalidReducedCopyChunkDataAttributesException { - - ReducedCopyChunkData reducedChunk = new ReducedCopyChunkData( - chunk.getSURL(), chunk.getDestinationSURL(), chunk.getStatus()); - reducedChunk.setPrimaryKey(chunk.getPrimaryKey()); - return reducedChunk; - } - - /** - * Creates a ReducedCopyChunkDataTO from the data contained in the received - * CopyChunkDataTO - * - * @param chunkTO - * @return - */ - private ReducedCopyChunkDataTO reduce(CopyChunkDataTO chunkTO) { - - ReducedCopyChunkDataTO reducedChunkTO = new ReducedCopyChunkDataTO(); - reducedChunkTO.setPrimaryKey(chunkTO.primaryKey()); - reducedChunkTO.setFromSURL(chunkTO.fromSURL()); - reducedChunkTO.setNormalizedSourceStFN(chunkTO.normalizedSourceStFN()); - reducedChunkTO.setSourceSurlUniqueID(chunkTO.sourceSurlUniqueID()); - reducedChunkTO.setToSURL(chunkTO.toSURL()); - reducedChunkTO.setNormalizedTargetStFN(chunkTO.normalizedTargetStFN()); - reducedChunkTO.setTargetSurlUniqueID(chunkTO.targetSurlUniqueID()); - reducedChunkTO.setStatus(chunkTO.status()); - reducedChunkTO.setErrString(chunkTO.errString()); - return reducedChunkTO; - } - - /** - * Checks if the received CopyChunkDataTO contains the fields not set by the - * front end but required - * - * @param chunkTO - * @return - */ - private boolean isComplete(CopyChunkDataTO chunkTO) { - - return (chunkTO.normalizedSourceStFN() != null) - && (chunkTO.sourceSurlUniqueID() != null && chunkTO - .normalizedTargetStFN() != null) - && (chunkTO.targetSurlUniqueID() != null); - } - - /** - * Checks if the received ReducedPtGChunkDataTO contains the fields not set by - * the front end but required - * - * @param reducedChunkTO - * @return - */ - @SuppressWarnings("unused") - private boolean isComplete(ReducedCopyChunkDataTO reducedChunkTO) { - - return (reducedChunkTO.normalizedSourceStFN() != null) - && (reducedChunkTO.sourceSurlUniqueID() != null && reducedChunkTO - .normalizedTargetStFN() != null) - && (reducedChunkTO.targetSurlUniqueID() != null); - } - - public void updateFromPreviousStatus(TRequestToken requestToken, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) { - - dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, - newStatusCode, explanation); - } - - public void updateFromPreviousStatus(TRequestToken requestToken, - List surlList, TStatusCode expectedStatusCode, - TStatusCode newStatusCode) { - - int[] surlsUniqueIDs = new int[surlList.size()]; - String[] surls = new String[surlList.size()]; - int index = 0; - for (TSURL tsurl : surlList) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surls[index] = tsurl.rawSurl(); - index++; - } - dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode); - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/CopyChunkDAO.java b/src/main/java/it/grid/storm/catalogs/CopyChunkDAO.java deleted file mode 100644 index 912acb9df..000000000 --- a/src/main/java/it/grid/storm/catalogs/CopyChunkDAO.java +++ /dev/null @@ -1,773 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.config.Configuration; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TStatusCode; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Timer; -import java.util.TimerTask; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * DAO class for PtPChunkCatalog. This DAO is specifically designed to connect - * to a MySQL DB. The raw data found in those tables is pre-treated in order to - * turn it into the Object Model of StoRM. See Method comments for further info. - * - * BEWARE! DAO Adjusts for extra fields in the DB that are not present in the - * object model. - * - * @author EGRID - ICTP Trieste - * @version 2.0 - * @date September 2005 - */ -public class CopyChunkDAO { - - private static final Logger log = LoggerFactory.getLogger(CopyChunkDAO.class); - - /* String with the name of the class for the DB driver */ - private final String driver = Configuration.getInstance().getDBDriver(); - /* String referring to the URL of the DB */ - private final String url = Configuration.getInstance().getStormDbURL(); - /* String with the password for the DB */ - private final String password = Configuration.getInstance().getDBPassword(); - /* String with the name for the DB */ - private final String name = Configuration.getInstance().getDBUserName(); - - /* Connection to DB - WARNING!!! It is kept open all the time! */ - private Connection con = null; - /* boolean that tells whether reconnection is needed because of MySQL bug! */ - private boolean reconnect = false; - - /* Singleton instance */ - private final static CopyChunkDAO dao = new CopyChunkDAO(); - - /* timer thread that will run a task to alert when reconnecting is necessary! */ - private Timer clock = null; - /* - * timer task that will update the boolean signaling that a reconnection is - * needed! - */ - private TimerTask clockTask = null; - /* milliseconds that must pass before reconnecting to DB */ - private final long period = Configuration.getInstance().getDBReconnectPeriod() * 1000; - /* initial delay in milliseconds before starting timer */ - private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000; - - private CopyChunkDAO() { - - setUpConnection(); - clock = new Timer(); - clockTask = new TimerTask() { - - @Override - public void run() { - - reconnect = true; - } - }; // clock task - clock.scheduleAtFixedRate(clockTask, delay, period); - } - - /** - * Method that returns the only instance of the CopyChunkDAO. - */ - public static CopyChunkDAO getInstance() { - - return dao; - } - - /** - * Method used to save the changes made to a retrieved CopyChunkDataTO, back - * into the MySQL DB. - * - * Only statusCode and explanation, of status_Copy table get written to the - * DB. Likewise for fileLifetime of request_queue table. - * - * In case of any error, an error messagge gets logged but no exception is - * thrown. - */ - public synchronized void update(CopyChunkDataTO to) { - - if (!checkConnection()) { - log.error("COPY CHUNK DAO: update - unable to get a valid connection!"); - return; - } - PreparedStatement updateFileReq = null; - try { - // ready updateFileReq... - updateFileReq = con - .prepareStatement("UPDATE request_queue rq JOIN (status_Copy sc, request_Copy rc) " - + "ON (rq.ID=rc.request_queueID AND sc.request_CopyID=rc.ID) " - + "SET sc.statusCode=?, sc.explanation=?, rq.fileLifetime=?, rq.config_FileStorageTypeID=?, rq.config_OverwriteID=?, " - + "rc.normalized_sourceSURL_StFN=?, rc.sourceSURL_uniqueID=?, rc.normalized_targetSURL_StFN=?, rc.targetSURL_uniqueID=? " - + "WHERE rc.ID=?"); - logWarnings(con.getWarnings()); - - updateFileReq.setInt(1, to.status()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(2, to.errString()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(3, to.lifeTime()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(4, to.fileStorageType()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(5, to.overwriteOption()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(6, to.normalizedSourceStFN()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(7, to.sourceSurlUniqueID()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(8, to.normalizedTargetStFN()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(9, to.targetSurlUniqueID()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setLong(10, to.primaryKey()); - logWarnings(updateFileReq.getWarnings()); - - // run updateFileReq - updateFileReq.executeUpdate(); - logWarnings(updateFileReq.getWarnings()); - } catch (SQLException e) { - log.error("COPY CHUNK DAO: Unable to complete update! {}", - e.getMessage(), e); - } finally { - close(updateFileReq); - } - } - - /** - * Updates the request_Get represented by the received ReducedPtGChunkDataTO - * by setting its normalized_sourceSURL_StFN and sourceSURL_uniqueID - * - * @param chunkTO - */ - public synchronized void updateIncomplete(ReducedCopyChunkDataTO chunkTO) { - - if (!checkConnection()) { - log - .error("COPY CHUNK DAO: updateIncomplete - unable to get a valid connection!"); - return; - } - String str = "UPDATE request_Copy SET normalized_sourceSURL_StFN=?, sourceSURL_uniqueID=?, normalized_targetSURL_StFN=?, targetSURL_uniqueID=? " - + "WHERE ID=?"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - stmt.setString(1, chunkTO.normalizedSourceStFN()); - logWarnings(stmt.getWarnings()); - - stmt.setInt(2, chunkTO.sourceSurlUniqueID()); - logWarnings(stmt.getWarnings()); - - stmt.setString(3, chunkTO.normalizedTargetStFN()); - logWarnings(stmt.getWarnings()); - - stmt.setInt(4, chunkTO.targetSurlUniqueID()); - logWarnings(stmt.getWarnings()); - - stmt.setLong(5, chunkTO.primaryKey()); - logWarnings(stmt.getWarnings()); - - log.trace("COPY CHUNK DAO - update incomplete: {}", stmt.toString()); - stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - } catch (SQLException e) { - log.error("COPY CHUNK DAO: Unable to complete update incomplete! {}", - e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method that queries the MySQL DB to find all entries matching the supplied - * TRequestToken. The Collection contains the corresponding CopyChunkDataTO - * objects. - * - * A complex query establishes all chunks associated with the request token, - * by properly joining request_queue, request_Copy and status_Copy. The - * considered fields are: - * - * (1) From status_Copy: the ID field which becomes the TOs primary key, and - * statusCode. - * - * (2) From request_Copy: targetSURL and sourceSURL. - * - * (3) From request_queue: fileLifetime, config_FileStorageTypeID, s_token, - * config_OverwriteID. - * - * In case of any error, a log gets written and an empty collection is - * returned. No exception is returned. - * - * NOTE! Chunks in SRM_ABORTED status are NOT returned! - */ - public synchronized Collection find( - TRequestToken requestToken) { - - if (!checkConnection()) { - log.error("COPY CHUNK DAO: find - unable to get a valid connection!"); - return new ArrayList(); - } - String strToken = requestToken.toString(); - String str = null; - PreparedStatement find = null; - ResultSet rs = null; - try { - /* get chunks of the request */ - str = "SELECT rq.s_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.fileLifetime, rc.ID, rc.sourceSURL, rc.targetSURL, rc.normalized_sourceSURL_StFN, rc.sourceSURL_uniqueID, rc.normalized_targetSURL_StFN, rc.targetSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels " - + "FROM request_queue rq JOIN (request_Copy rc, status_Copy sc) " - + "ON (rc.request_queueID=rq.ID AND sc.request_CopyID=rc.ID) " - + "LEFT JOIN request_DirOption d ON rc.request_DirOptionID=d.ID " - + "WHERE rq.r_token=? AND sc.statusCode<>?"; - - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, strToken); - logWarnings(find.getWarnings()); - - find.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(find.getWarnings()); - - log.debug("COPY CHUNK DAO: find method; " + find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - - CopyChunkDataTO chunkDataTO; - while (rs.next()) { - chunkDataTO = new CopyChunkDataTO(); - chunkDataTO.setRequestToken(strToken); - chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); - chunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setLifeTime(rs.getInt("rq.fileLifetime")); - chunkDataTO.setPrimaryKey(rs.getLong("rc.ID")); - chunkDataTO.setFromSURL(rs.getString("rc.sourceSURL")); - chunkDataTO.setNormalizedSourceStFN(rs - .getString("rc.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rc.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSourceSurlUniqueID(new Integer(uniqueID)); - } - - chunkDataTO.setToSURL(rs.getString("rc.targetSURL")); - chunkDataTO.setNormalizedTargetStFN(rs - .getString("rc.normalized_sourceSURL_StFN")); - uniqueID = rs.getInt("rc.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setTargetSurlUniqueID(new Integer(uniqueID)); - } - - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("COPY CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - - } - - public synchronized Collection find( - TRequestToken requestToken, int[] surlUniqueIDs, String[] surls) { - - if (!checkConnection()) { - log.error("COPY CHUNK DAO: find - unable to get a valid connection!"); - return new ArrayList(); - } - String strToken = requestToken.toString(); - String str = null; - PreparedStatement find = null; - ResultSet rs = null; - try { - /* get chunks of the request */ - str = "SELECT rq.s_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.fileLifetime, rc.ID, rc.sourceSURL, rc.targetSURL, rc.normalized_sourceSURL_StFN, rc.sourceSURL_uniqueID, rc.normalized_targetSURL_StFN, rc.targetSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels " - + "FROM request_queue rq JOIN (request_Copy rc, status_Copy sc) " - + "ON (rc.request_queueID=rq.ID AND sc.request_CopyID=rc.ID) " - + "LEFT JOIN request_DirOption d ON rc.request_DirOptionID=d.ID " - + "WHERE rq.r_token=? AND ( rc.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlUniqueIDs) - + " AND rc.sourceSURL IN " - + makeSurlString(surls) + " ) "; - - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, strToken); - logWarnings(find.getWarnings()); - - log.debug("COPY CHUNK DAO: find method; {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - - CopyChunkDataTO chunkDataTO; - while (rs.next()) { - chunkDataTO = new CopyChunkDataTO(); - chunkDataTO.setRequestToken(strToken); - chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); - chunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setLifeTime(rs.getInt("rq.fileLifetime")); - chunkDataTO.setPrimaryKey(rs.getLong("rc.ID")); - chunkDataTO.setFromSURL(rs.getString("rc.sourceSURL")); - chunkDataTO.setNormalizedSourceStFN(rs - .getString("rc.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rc.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSourceSurlUniqueID(new Integer(uniqueID)); - } - - chunkDataTO.setToSURL(rs.getString("rc.targetSURL")); - chunkDataTO.setNormalizedTargetStFN(rs - .getString("rc.normalized_sourceSURL_StFN")); - uniqueID = rs.getInt("rc.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setTargetSurlUniqueID(new Integer(uniqueID)); - } - - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("COPY CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - - } - - /** - * Method used in extraordinary situations to signal that data retrieved from - * the DB was malformed and could not be translated into the StoRM object - * model. - * - * This method attempts to change the status of the request to SRM_FAILURE and - * record it in the DB. - * - * This operation could potentially fail because the source of the malformed - * problems could be a problematic DB; indeed, initially only log messagges - * where recorded. - * - * Yet it soon became clear that the source of malformed data were the clients - * and/or FE recording info in the DB. In these circumstances the client would - * its request as being in the SRM_IN_PROGRESS state for ever. Hence the - * pressing need to inform it of the encountered problems. - */ - public synchronized void signalMalformedCopyChunk(CopyChunkDataTO auxTO) { - - if (!checkConnection()) { - log - .error("COPY CHUNK DAO: signalMalformedCopyChunk - unable to get a valid connection!"); - return; - } - String signalSQL = "UPDATE status_Copy SET statusCode=" - + StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FAILURE) - + ", explanation=? WHERE request_CopyID=" + auxTO.primaryKey(); - - PreparedStatement signal = null; - try { - /* update storm_put_filereq */ - signal = con.prepareStatement(signalSQL); - logWarnings(con.getWarnings()); - - /* Prepared statement spares DB-specific String notation! */ - signal.setString(1, "Request is malformed!"); - logWarnings(signal.getWarnings()); - - signal.executeUpdate(); - logWarnings(signal.getWarnings()); - } catch (SQLException e) { - log.error("CopyChunkDAO! Unable to signal in DB that the request was " - + "malformed! Request: {}; Error: {}", auxTO.toString(), - e.getMessage(), e); - } finally { - close(signal); - } - } - - /** - * Auxiliary method used to close a Statement - */ - private void close(Statement stmt) { - - if (stmt != null) { - try { - stmt.close(); - } catch (Exception e) { - log.error("COPY CHUNK DAO! Unable to close Statement {} - Error: {}", - stmt.toString(), e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to close a ResultSet - */ - private void close(ResultSet rset) { - - if (rset != null) { - try { - rset.close(); - } catch (Exception e) { - log.error("COPY CHUNK DAO! Unable to close ResultSet! Error: {}", - e.getMessage(), e); - } - } - } - - /** - * Auxiliary private method that logs all SQL warnings. - */ - private void logWarnings(SQLWarning w) { - - if (w != null) { - log.debug("COPY CHUNK DAO: {}", w.toString()); - while ((w = w.getNextWarning()) != null) { - log.debug("COPY CHUNK DAO: {}", w.toString()); - } - } - } - - /** - * Auxiliary method that sets up the conenction to the DB. - */ - private boolean setUpConnection() { - - boolean response = false; - try { - Class.forName(driver); - con = DriverManager.getConnection(url, name, password); - logWarnings(con.getWarnings()); - response = con.isValid(0); - } catch (SQLException | ClassNotFoundException e) { - log.error("COPY CHUNK DAO! Exception in setUpConnection! {}", e.getMessage(), e); - } - return response; - } - - /** - * Auxiliary method that checks if time for resetting the connection has come, - * and eventually takes it down and up back again. - */ - private synchronized boolean checkConnection() { - - boolean response = true; - if (reconnect) { - log.debug("COPY CHUNK DAO! Reconnecting to DB! "); - takeDownConnection(); - response = setUpConnection(); - if (response) { - reconnect = false; - } - } - return response; - } - - /** - * Auxiliary method that takes down a conenctin to the DB. - */ - private void takeDownConnection() { - - if (con != null) { - try { - con.close(); - } catch (SQLException e) { - log.error("COPY CHUNK DAO! Exception in takeDownConnection method: {}", - e.getMessage(), e); - } - } - } - - public synchronized void updateStatusOnMatchingStatus( - TRequestToken requestToken, TStatusCode expectedStatusCode, - TStatusCode newStatusCode, String explanation) { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || explanation == null) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken + " explanation=" - + explanation); - } - doUpdateStatusOnMatchingStatus(requestToken, null, null, - expectedStatusCode, newStatusCode, explanation, true, false, true); - } - - public synchronized void updateStatusOnMatchingStatus( - TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls, - TStatusCode expectedStatusCode, TStatusCode newStatusCode) - throws IllegalArgumentException { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || surlsUniqueIDs == null || surls == null || surlsUniqueIDs.length == 0 - || surls.length == 0 || surlsUniqueIDs.length != surls.length) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken - + "surlsUniqueIDs=" + surlsUniqueIDs + " surls=" + surls); - } - doUpdateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode, null, true, true, false); - } - - public synchronized void doUpdateStatusOnMatchingStatus( - TRequestToken requestToken, int[] surlUniqueIDs, String[] surls, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation, boolean withRequestToken, boolean withSurls, - boolean withExplanation) throws IllegalArgumentException { - - if ((withRequestToken && requestToken == null) - || (withExplanation && explanation == null) - || (withSurls && (surlUniqueIDs == null || surls == null))) { - throw new IllegalArgumentException( - "Unable to perform the doUpdateStatusOnMatchingStatus, " - + "invalid arguments: withRequestToken=" + withRequestToken - + " requestToken=" + requestToken + " withSurls=" + withSurls - + " surlUniqueIDs=" + surlUniqueIDs + " surls=" + surls - + " withExplaination=" + withExplanation + " explanation=" - + explanation); - } - if (!checkConnection()) { - log - .error("COPY CHUNK DAO: updateStatusOnMatchingStatus - unable to get a valid connection!"); - return; - } - String str = "UPDATE request_queue rq JOIN (status_Copy sc, request_Copy rc) " - + "ON (rq.ID=rc.request_queueID AND sc.request_CopyID=rc.ID) " - + "SET sc.statusCode=? "; - if (withExplanation) { - str += " , " + buildExpainationSet(explanation); - } - str += " WHERE sc.statusCode=? "; - if (withRequestToken) { - str += " AND " + buildTokenWhereClause(requestToken); - } - if (withSurls) { - str += " AND " + buildSurlsWhereClause(surlUniqueIDs, surls); - } - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - logWarnings(con.getWarnings()); - stmt.setInt(1, StatusCodeConverter.getInstance().toDB(newStatusCode)); - logWarnings(stmt.getWarnings()); - - stmt - .setInt(2, StatusCodeConverter.getInstance().toDB(expectedStatusCode)); - logWarnings(stmt.getWarnings()); - - log.trace("COPY CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt.toString()); - int count = stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("COPY CHUNK DAO! No chunk of COPY request was updated " - + "from {} to {}.", expectedStatusCode, newStatusCode); - } else { - log.debug("COPY CHUNK DAO! {} chunks of COPY requests were updated " - + "from {} to {}.", count, expectedStatusCode, newStatusCode); - } - } catch (SQLException e) { - log.error("COPY CHUNK DAO! Unable to updated from {} to {}! {}", - expectedStatusCode, newStatusCode, e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method that returns a String containing all Surl's IDs. - */ - private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) { - - StringBuilder sb = new StringBuilder("("); - for (int i = 0; i < surlUniqueIDs.length; i++) { - if (i > 0) { - sb.append(","); - } - sb.append(surlUniqueIDs[i]); - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns a String containing all Surls. - */ - private String makeSurlString(String[] surls) { - - StringBuilder sb = new StringBuilder("("); - int n = surls.length; - for (int i = 0; i < n; i++) { - sb.append("'"); - sb.append(surls[i]); - sb.append("'"); - if (i < (n - 1)) { - sb.append(","); - } - } - sb.append(")"); - return sb.toString(); - } - - public synchronized Collection find(int[] surlsUniqueIDs, - String[] surlsArray, String dn) throws IllegalArgumentException { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0 || dn == null) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " dn=" + dn); - } - return find(surlsUniqueIDs, surlsArray, dn, true); - } - - public synchronized Collection find(int[] surlsUniqueIDs, - String[] surlsArray) throws IllegalArgumentException { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray); - } - return find(surlsUniqueIDs, surlsArray, null, false); - } - - private synchronized Collection find(int[] surlsUniqueIDs, - String[] surlsArray, String dn, boolean withDn) - throws IllegalArgumentException { - - if ((withDn && dn == null) || surlsUniqueIDs == null - || surlsUniqueIDs.length == 0 || surlsArray == null - || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn); - } - if (!checkConnection()) { - log.error("COPY CHUNK DAO: find - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - String str = "SELECT rq.r_token, rq.s_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, " - + "rq.fileLifetime, rc.ID, rc.sourceSURL, rc.targetSURL, rc.normalized_sourceSURL_StFN, " - + "rc.sourceSURL_uniqueID, rc.normalized_targetSURL_StFN, rc.targetSURL_uniqueID, d.isSourceADirectory, " - + "d.allLevelRecursive, d.numOfLevels " - + "FROM request_queue rq JOIN (request_Copy rc, status_Copy sc) " - + "ON (rc.request_queueID=rq.ID AND sc.request_CopyID=rc.ID) " - + "LEFT JOIN request_DirOption d ON rc.request_DirOptionID=d.ID " - + "WHERE ( rc.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) - + " AND rc.sourceSURL IN " - + makeSurlString(surlsArray) + " )"; - if (withDn) { - str += " AND rq.client_dn=\'" + dn + "\'"; - } - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - List list = new ArrayList(); - - log.trace("COPY CHUNK DAO - find method: {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - CopyChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new CopyChunkDataTO(); - chunkDataTO.setRequestToken(rs.getString("rq.r_token")); - chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); - chunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setLifeTime(rs.getInt("rq.fileLifetime")); - chunkDataTO.setPrimaryKey(rs.getLong("rc.ID")); - chunkDataTO.setFromSURL(rs.getString("rc.sourceSURL")); - chunkDataTO.setNormalizedSourceStFN(rs - .getString("rc.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rc.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSourceSurlUniqueID(new Integer(uniqueID)); - } - - chunkDataTO.setToSURL(rs.getString("rc.targetSURL")); - chunkDataTO.setNormalizedTargetStFN(rs - .getString("rc.normalized_sourceSURL_StFN")); - uniqueID = rs.getInt("rc.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setTargetSurlUniqueID(new Integer(uniqueID)); - } - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("COPY CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - private String buildExpainationSet(String explanation) { - - return " sc.explanation='" + explanation + "' "; - } - - private String buildTokenWhereClause(TRequestToken requestToken) { - - return " rq.r_token='" + requestToken.toString() + "' "; - } - - private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) { - - return " ( rc.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rc.sourceSURL IN " - + makeSurlString(surls) + " ) "; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/CopyChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/CopyChunkDataTO.java deleted file mode 100644 index 1b455ac7a..000000000 --- a/src/main/java/it/grid/storm/catalogs/CopyChunkDataTO.java +++ /dev/null @@ -1,277 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import java.sql.Timestamp; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TStatusCode; - -/** - * Class that represents a row in the Persistence Layer: this is all raw data - * referring to the CopyChunkData proper, that is, String and primitive types. - * - * Each field is initialized with default values as per SRM 2.2 specification: - * fileStorageType VOLATILE overwriteMode NEVER status SRM_REQUEST_QUEUED - * - * All other fields are 0 if int, or a white space if String. - * - * @author EGRID ICTP - * @version 2.0 - * @date Semptember 2005 - */ -public class CopyChunkDataTO { - - /* Database table request_Get fields BEGIN */ - private long primaryKey = -1; // ID primary key of record in DB - private String fromSURL = " "; - private String toSURL = " "; - private String normalizedSourceStFN = null; - private Integer sourceSurlUniqueID = null; - private String normalizedTargetStFN = null; - private Integer targetSurlUniqueID = null; - /* Database table request_Get fields END */ - - private String requestToken = " "; - private int lifetime = 0; - private String fileStorageType = null; // initialised in constructor - private String spaceToken = " "; - private String overwriteOption = null; // initialised in constructor - private int status; // initialised in constructor - private String errString = " "; - private Timestamp timeStamp = null; - - public CopyChunkDataTO() { - - fileStorageType = FileStorageTypeConverter.getInstance().toDB( - TFileStorageType.VOLATILE); - overwriteOption = OverwriteModeConverter.getInstance().toDB( - TOverwriteMode.NEVER); - status = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_QUEUED); - } - - public long primaryKey() { - - return primaryKey; - } - - public void setPrimaryKey(long n) { - - primaryKey = n; - } - - public String requestToken() { - - return requestToken; - } - - public void setRequestToken(String s) { - - requestToken = s; - } - - public Timestamp timeStamp() { - - return timeStamp; - } - - public void setTimeStamp(Timestamp timeStamp) { - - this.timeStamp = timeStamp; - } - - public String fromSURL() { - - return fromSURL; - } - - public void setFromSURL(String s) { - - fromSURL = s; - } - - /** - * @return the normalizedStFN - */ - public String normalizedSourceStFN() { - - return normalizedSourceStFN; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedSourceStFN(String normalizedStFN) { - - this.normalizedSourceStFN = normalizedStFN; - } - - /** - * @return the surlUniqueID - */ - public Integer sourceSurlUniqueID() { - - return sourceSurlUniqueID; - } - - /** - * @param surlUniqueID - * the surlUniqueID to set - */ - public void setSourceSurlUniqueID(Integer surlUniqueID) { - - this.sourceSurlUniqueID = surlUniqueID; - } - - /** - * @return the normalizedStFN - */ - public String normalizedTargetStFN() { - - return normalizedTargetStFN; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedTargetStFN(String normalizedStFN) { - - this.normalizedTargetStFN = normalizedStFN; - } - - /** - * @return the surlUniqueID - */ - public Integer targetSurlUniqueID() { - - return targetSurlUniqueID; - } - - /** - * @param surlUniqueID - * the surlUniqueID to set - */ - public void setTargetSurlUniqueID(Integer surlUniqueID) { - - this.targetSurlUniqueID = surlUniqueID; - } - - public String toSURL() { - - return toSURL; - } - - public void setToSURL(String s) { - - toSURL = s; - } - - public int lifeTime() { - - return lifetime; - } - - public void setLifeTime(int n) { - - lifetime = n; - } - - public String fileStorageType() { - - return fileStorageType; - } - - /** - * Method used to set the FileStorageType: if s is null nothing gets set; the - * internal default String is the one relative to Volatile FileStorageType. - */ - public void setFileStorageType(String s) { - - if (s != null) - fileStorageType = s; - } - - public String spaceToken() { - - return spaceToken; - } - - public void setSpaceToken(String s) { - - spaceToken = s; - } - - public String overwriteOption() { - - return overwriteOption; - } - - /** - * Method used to set the OverwriteMode: if s is null nothing gets set; the - * internal default String is the one relative to Never OverwriteMode. - */ - public void setOverwriteOption(String s) { - - if (s != null) - overwriteOption = s; - } - - public int status() { - - return status; - } - - public void setStatus(int n) { - - status = n; - } - - public String errString() { - - return errString; - } - - public void setErrString(String s) { - - errString = s; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(primaryKey); - sb.append(" "); - sb.append(requestToken); - sb.append(" "); - sb.append(fromSURL); - sb.append(" "); - sb.append(normalizedSourceStFN); - sb.append(" "); - sb.append(sourceSurlUniqueID); - sb.append(" "); - sb.append(toSURL); - sb.append(" "); - sb.append(normalizedTargetStFN); - sb.append(" "); - sb.append(targetSurlUniqueID); - sb.append(" "); - sb.append(lifetime); - sb.append(" "); - sb.append(fileStorageType); - sb.append(" "); - sb.append(spaceToken); - sb.append(" "); - sb.append(overwriteOption); - sb.append(" "); - sb.append(status); - sb.append(" "); - sb.append(errString); - sb.append(" "); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/CopyData.java b/src/main/java/it/grid/storm/catalogs/CopyData.java deleted file mode 100644 index 103cdaf9e..000000000 --- a/src/main/java/it/grid/storm/catalogs/CopyData.java +++ /dev/null @@ -1,143 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TStatusCode; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class represents a CopyChunkData, that is part of a multifile Copy srm - * request. It contains data about: the requestToken, the fromSURL, the toSURL, - * the target fileLifeTime, the target fileStorageType and any available target - * spaceToken, the target overwriteOption to be applied in case the file already - * exists, the fileSize of the existing file if any, return status of the file - * together with its error string. - * - * @author EGRID - ICTP Trieste - * @date September, 2005 - * @version 2.0 - */ -public class CopyData extends SurlMultyOperationRequestData { - - private static final Logger log = LoggerFactory.getLogger(CopyData.class); - - /** - * SURL to which the srmCopy will put the file - */ - protected TSURL destinationSURL; - - /** - * requested lifetime - BEWARE!!! It is the fileLifetime at destination in - * case of Volatile files! - */ - protected TLifeTimeInSeconds lifetime; - - /** - * TFileStorageType at destination - */ - protected TFileStorageType fileStorageType; - - /** - * SpaceToken to use for toSURL - */ - protected TSpaceToken spaceToken; - - /** - * specifies the behaviour in case of existing files for Put part of the copy - * (could be local or remote!) - */ - protected TOverwriteMode overwriteOption; - - public CopyData(TSURL fromSURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, TReturnStatus status) - throws InvalidCopyDataAttributesException, - InvalidSurlRequestDataAttributesException { - - super(fromSURL, status); - if (destinationSURL == null || lifetime == null || fileStorageType == null - || spaceToken == null || overwriteOption == null) { - throw new InvalidCopyDataAttributesException(fromSURL, destinationSURL, - lifetime, fileStorageType, spaceToken, overwriteOption, status); - } - this.destinationSURL = destinationSURL; - this.lifetime = lifetime; - this.fileStorageType = fileStorageType; - this.spaceToken = spaceToken; - this.overwriteOption = overwriteOption; - } - - /** - * Method that returns the toSURL of the srm request to which this chunk - * belongs. - */ - public TSURL getDestinationSURL() { - - return destinationSURL; - } - - /** - * Method that returns the requested pin life time for this chunk of the srm - * request. - */ - public TLifeTimeInSeconds getLifetime() { - - return lifetime; - } - - /** - * Method that returns the fileStorageType for this chunk of the srm request. - */ - public TFileStorageType getFileStorageType() { - - return fileStorageType; - } - - /** - * Method that returns the space token supplied for this chunk of the srm - * request. - */ - public TSpaceToken getSpaceToken() { - - return spaceToken; - } - - /** - * Method that returns the overwriteOption specified in the srm request. - */ - public TOverwriteMode getOverwriteOption() { - - return overwriteOption; - } - - /** - * Method that sets the status of this request to SRM_DUPLICATION_ERROR; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_DUPLICATION_ERROR(String explanation) { - - setStatus(TStatusCode.SRM_DUPLICATION_ERROR, explanation); - } - - /** - * Method that sets the status of this request to SRM_FATAL_INTERNAL_ERROR; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_FATAL_INTERNAL_ERROR(String explanation) { - - setStatus(TStatusCode.SRM_FATAL_INTERNAL_ERROR, explanation); - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/CopyGlobalFlagConverter.java b/src/main/java/it/grid/storm/catalogs/CopyGlobalFlagConverter.java deleted file mode 100644 index a64729a19..000000000 --- a/src/main/java/it/grid/storm/catalogs/CopyGlobalFlagConverter.java +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import java.util.Map; -import java.util.HashMap; -import java.util.Iterator; -import it.grid.storm.srm.types.TOverwriteMode; - -/** - * Package private auxiliary class used to convert between DPM and StoRM - * representation of Copy TOverwriteMode+RemoveSourceFiles global information - * for the whole request, and Flags in storm_req. - * - * @author: EGRID - ICTP Trieste - * @version: 1.0 - * @date: September 2005 - */ -class CopyGlobalFlagConverter { - - private Map DPMtoSTORM = new HashMap(); - private Map STORMtoDPM = new HashMap(); - - private static CopyGlobalFlagConverter c = new CopyGlobalFlagConverter(); - - /** - * Private constructor that fills in the conversion table; in particular, DPM - * uses int values to represent the pair of values: - * - * 0 NEVER + DO NOT RemoveSourceFiles 1 ALWAYS + DO NOT RemoveSourceFiles 2 - * WHENFILESAREDIFFERENT + DO NOT RemoveSourceFiles 4 NEVER + - * RemoveSourceFiles 5 ALWAYS + RemoveSourceFiles 6 WHENFILESAREDIFFERENT + - * RemoveSourceFiles - */ - private CopyGlobalFlagConverter() { - - DPMtoSTORM.put(new Integer(0), new Object[] { TOverwriteMode.NEVER, - new Boolean(false) }); - DPMtoSTORM.put(new Integer(1), new Object[] { TOverwriteMode.ALWAYS, - new Boolean(false) }); - DPMtoSTORM.put(new Integer(2), new Object[] { - TOverwriteMode.WHENFILESAREDIFFERENT, new Boolean(false) }); - DPMtoSTORM.put(new Integer(4), new Object[] { TOverwriteMode.NEVER, - new Boolean(true) }); - DPMtoSTORM.put(new Integer(5), new Object[] { TOverwriteMode.ALWAYS, - new Boolean(true) }); - DPMtoSTORM.put(new Integer(6), new Object[] { - TOverwriteMode.WHENFILESAREDIFFERENT, new Boolean(true) }); - Object aux; - for (Iterator i = DPMtoSTORM.keySet().iterator(); i.hasNext();) { - aux = i.next(); - STORMtoDPM.put(DPMtoSTORM.get(aux), aux); - } - } - - /** - * Method that returns the only instance of OverwriteModeConverter. - */ - public static CopyGlobalFlagConverter getInstance() { - - return c; - } - - /** - * Method that returns the int used by DPM to represent the given - * TOverwriteMode and removeSourceFiles boolean. -1 is returned if no match is - * found. - */ - public int toDPM(TOverwriteMode om, boolean removeSourceFiles) { - - Integer aux = (Integer) STORMtoDPM.get(new Object[] { om, - new Boolean(removeSourceFiles) }); - if (aux == null) - return -1; - return aux.intValue(); - } - - /** - * Method that returns an Object[] containing the TOverwriteMode and the - * boolean used by StoRM to represent the supplied int representation of DPM. - * An empty Object[] is returned if no StoRM type is found. - */ - public Object[] toSTORM(int n) { - - Object[] aux = (Object[]) DPMtoSTORM.get(new Integer(n)); - if (aux == null) - return new Object[] {}; - return aux; - } - - public String toString() { - - return "OverWriteModeConverter.\nDPMtoSTORM map:" + DPMtoSTORM - + "\nSTORMtoDPM map:" + STORMtoDPM; - } -} diff --git a/src/main/java/it/grid/storm/catalogs/CopyPersistentChunkData.java b/src/main/java/it/grid/storm/catalogs/CopyPersistentChunkData.java deleted file mode 100644 index 419ff1515..000000000 --- a/src/main/java/it/grid/storm/catalogs/CopyPersistentChunkData.java +++ /dev/null @@ -1,97 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSpaceToken; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class represents a CopyChunkData, that is part of a multifile Copy srm - * request. It contains data about: the requestToken, the fromSURL, the toSURL, - * the target fileLifeTime, the target fileStorageType and any available target - * spaceToken, the target overwriteOption to be applied in case the file already - * exists, the fileSize of the existing file if any, return status of the file - * together with its error string. - * - * @author EGRID - ICTP Trieste - * @date September, 2005 - * @version 2.0 - */ -public class CopyPersistentChunkData extends CopyData implements - PersistentChunkData { - - private static final Logger log = LoggerFactory - .getLogger(CopyPersistentChunkData.class); - - /** - * long representing the primary key for the persistence layer! - */ - private long primaryKey = -1; - - /** - * This is the requestToken of the multifile srm request to which this chunk - * belongs - */ - private TRequestToken requestToken; - - public CopyPersistentChunkData(TRequestToken requestToken, TSURL fromSURL, - TSURL destinationSURL, TLifeTimeInSeconds lifetime, - TFileStorageType fileStorageType, TSpaceToken spaceToken, - TOverwriteMode overwriteOption, TReturnStatus status) - throws InvalidCopyPersistentChunkDataAttributesException, - InvalidCopyDataAttributesException, - InvalidSurlRequestDataAttributesException { - - super(fromSURL, destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption, status); - if (requestToken == null) { - log.debug("CopyPersistentChunkData: requestToken is null!"); - throw new InvalidCopyPersistentChunkDataAttributesException(requestToken, - fromSURL, destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption, status); - } - this.requestToken = requestToken; - } - - /** - * Method used to get the primary key used in the persistence layer! - */ - public long getPrimaryKey() { - - return primaryKey; - } - - /** - * Method used to set the primary key to be used in the persistence layer! - */ - public void setPrimaryKey(long l) { - - primaryKey = l; - } - - /** - * Method that returns the requestToken of the srm request to which this chunk - * belongs. - */ - public TRequestToken getRequestToken() { - - return requestToken; - } - - @Override - public long getIdentifier() { - - return getPrimaryKey(); - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/CopySpecificFlagConverter.java b/src/main/java/it/grid/storm/catalogs/CopySpecificFlagConverter.java deleted file mode 100644 index bd269f407..000000000 --- a/src/main/java/it/grid/storm/catalogs/CopySpecificFlagConverter.java +++ /dev/null @@ -1,99 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import java.util.Map; -import java.util.HashMap; -import java.util.Iterator; -import it.grid.storm.srm.types.TOverwriteMode; - -/** - * Package private auxiliary class used to convert between DPM and StoRM - * representation of Copy TOverwriteMode+TDirOption request specific - * information, and Flags in storm_copy_filereq. - * - * @author: EGRID - ICTP Trieste - * @version: 1.0 - * @date: September 2005 - */ -class CopySpecificFlagConverter { - - private Map DPMtoSTORM = new HashMap(); - private Map STORMtoDPM = new HashMap(); - - private static CopySpecificFlagConverter c = new CopySpecificFlagConverter(); - - /** - * Private constructor that fills in the conversion table; in particular, DPM - * uses int values to represent the pair of values: - * - * 0 NEVER + source NOT directory 1 ALWAYS + source NOT directory 2 - * WHENFILESAREDIFFERENT + source NOT directory 4 NEVER + source is directory - * 5 ALWAYS + source is directory 6 WHENFILESAREDIFFERENT + source is - * directory - */ - private CopySpecificFlagConverter() { - - DPMtoSTORM.put(new Integer(0), new Object[] { TOverwriteMode.NEVER, - new Boolean(false) }); - DPMtoSTORM.put(new Integer(1), new Object[] { TOverwriteMode.ALWAYS, - new Boolean(false) }); - DPMtoSTORM.put(new Integer(2), new Object[] { - TOverwriteMode.WHENFILESAREDIFFERENT, new Boolean(false) }); - DPMtoSTORM.put(new Integer(4), new Object[] { TOverwriteMode.NEVER, - new Boolean(true) }); - DPMtoSTORM.put(new Integer(5), new Object[] { TOverwriteMode.ALWAYS, - new Boolean(true) }); - DPMtoSTORM.put(new Integer(6), new Object[] { - TOverwriteMode.WHENFILESAREDIFFERENT, new Boolean(true) }); - Object aux; - for (Iterator i = DPMtoSTORM.keySet().iterator(); i.hasNext();) { - aux = i.next(); - STORMtoDPM.put(DPMtoSTORM.get(aux), aux); - } - } - - /** - * Method that returns the only instance of CopySpecificFlagConverter. - */ - public static CopySpecificFlagConverter getInstance() { - - return c; - } - - /** - * Method that returns the int used by DPM to represent the given - * TOverwriteMode and isSourceADirectory boolean. -1 is returned if no match - * is found. - */ - public int toDPM(TOverwriteMode om, boolean isSourceADirectory) { - - Integer aux = (Integer) STORMtoDPM.get(new Object[] { om, - new Boolean(isSourceADirectory) }); - if (aux == null) - return -1; - return aux.intValue(); - } - - /** - * Method that returns an Object[] containing the TOverwriteMode and the - * Boolean used by StoRM to represent the supplied int representation of DPM. - * An empty Object[] is returned if no StoRM type is found. - */ - public Object[] toSTORM(int n) { - - Object[] aux = (Object[]) DPMtoSTORM.get(new Integer(n)); - if (aux == null) - return new Object[] {}; - return aux; - } - - public String toString() { - - return "OverWriteModeConverter.\nDPMtoSTORM map:" + DPMtoSTORM - + "\nSTORMtoDPM map:" + STORMtoDPM; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/FileLifetimeConverter.java b/src/main/java/it/grid/storm/catalogs/FileLifetimeConverter.java deleted file mode 100644 index 3627d68c6..000000000 --- a/src/main/java/it/grid/storm/catalogs/FileLifetimeConverter.java +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.config.Configuration; - -/** - * Class that handles DB representation of a pinLifetime as expressed by a - * TLifetimeInSeconds objects; in particular it takes care of protocol - * specification: - * - * 0/null/negative are translated as default StoRM configurable values. StoRMs - * Empty TLifeTimeInSeconds is translated as 0. - * - * @author EGRID ICTP - * @version 1.0 - * @date March 2007 - */ -public class FileLifetimeConverter { - - private static FileLifetimeConverter stc = new FileLifetimeConverter(); // only - // instance - - private FileLifetimeConverter() { - - } - - /** - * Method that returns the only instance of SizeInBytesIntConverter - */ - public static FileLifetimeConverter getInstance() { - - return stc; - } - - /** - * Method that translates the Empty TLifeTimeInSeconds into the empty - * representation of DB which is 0. Any other value is left as is. - */ - public int toDB(long l) { - - if (l == TLifeTimeInSeconds.makeEmpty().value()) - return 0; - return new Long(l).intValue(); - } - - /** - * Method that returns the long corresponding to the int value in the DB, - * except if it is 0, NULL or negative; a configurable default value is - * returned instead, corresponding to the getFileLifetimeDefault() - * Configuration class method. - */ - public long toStoRM(int s) { - - if (s <= 0) - return Configuration.getInstance().getFileLifetimeDefault(); - return new Integer(s).longValue(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/FileStorageTypeConverter.java b/src/main/java/it/grid/storm/catalogs/FileStorageTypeConverter.java deleted file mode 100644 index 0f8f81710..000000000 --- a/src/main/java/it/grid/storm/catalogs/FileStorageTypeConverter.java +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import java.util.Map; -import java.util.HashMap; -import java.util.Iterator; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.config.Configuration; - -/** - * Package private auxiliary class used to convert between DB raw data and StoRM - * object model representation of TFileStorageType. - * - * @author: EGRID ICTP - * @version: 2.0 - * @date: June 2005 - */ -class FileStorageTypeConverter { - - private Map DBtoSTORM = new HashMap(); - private Map STORMtoDB = new HashMap(); - - private static FileStorageTypeConverter c = new FileStorageTypeConverter(); - - /** - * Private constructor that fills in the conversion tables; - * - * V - VOLATILE P - PERMANENT D - DURABLE - */ - private FileStorageTypeConverter() { - - DBtoSTORM.put("V", TFileStorageType.VOLATILE); - DBtoSTORM.put("P", TFileStorageType.PERMANENT); - DBtoSTORM.put("D", TFileStorageType.DURABLE); - String aux; - for (Iterator i = DBtoSTORM.keySet().iterator(); i.hasNext();) { - aux = i.next(); - STORMtoDB.put(DBtoSTORM.get(aux), aux); - } - } - - /** - * Method that returns the only instance of FileStorageTypeConverter. - */ - public static FileStorageTypeConverter getInstance() { - - return c; - } - - /** - * Method that returns the String used in the DB to represent the given - * TFileStorageType. The empty String "" is returned if no match is found. - */ - public String toDB(TFileStorageType fst) { - - String aux = (String) STORMtoDB.get(fst); - if (aux == null) - return ""; - return aux; - } - - /** - * Method that returns the TFileStorageType used by StoRM to represent the - * supplied String representation in the DB. A configured default - * TFileStorageType is returned in case no corresponding StoRM type is found. - * TFileStorageType.EMPTY is returned if there are configuration errors. - */ - public TFileStorageType toSTORM(String s) { - - TFileStorageType aux = DBtoSTORM.get(s); - if (aux == null) - // This case is that the String s is different from V,P or D. - aux = DBtoSTORM.get(Configuration.getInstance() - .getDefaultFileStorageType()); - if (aux == null) - // This case should never happen, but in case we prefer ponder PERMANENT. - return TFileStorageType.EMPTY; - else - return aux; - } - - public String toString() { - - return "FileStorageTypeConverter.\nDBtoSTORM map:" + DBtoSTORM - + "\nSTORMtoDB map:" + STORMtoDB; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidBoLChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidBoLChunkDataAttributesException.java deleted file mode 100644 index 6046d423d..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidBoLChunkDataAttributesException.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TTURL; - -/** - * This class represents an exceptin thrown when the attributes supplied to the - * constructor of BoLChunkData are invalid, that is if any of the following is - * _null_: requestToken, fromSURL, lifeTime, numOfLevels, transferProtocols, - * fileSize, status, transferURL. - * - * @author CNAF - * @date Aug 2009 - * @version 1.0 - */ -public class InvalidBoLChunkDataAttributesException extends Exception { - - private static final long serialVersionUID = 5657310881067434280L; - - // booleans that indicate whether the corresponding variable is null - private boolean nullRequestToken; - private boolean nullFromSURL; - private boolean nullLifeTime; - private boolean nullDirOption; - private boolean nullTransferProtocols; - private boolean nullFileSize; - private boolean nullStatus; - private boolean nullTransferURL; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidBoLChunkDataAttributesException(TRequestToken requestToken, - TSURL fromSURL, TLifeTimeInSeconds lifeTime, TDirOption dirOption, - TURLPrefix transferProtocols, TSizeInBytes fileSize, TReturnStatus status, - TTURL transferURL) { - - nullRequestToken = requestToken == null; - nullFromSURL = fromSURL == null; - nullLifeTime = lifeTime == null; - nullDirOption = dirOption == null; - nullTransferProtocols = transferProtocols == null; - nullFileSize = fileSize == null; - nullStatus = status == null; - nullTransferURL = transferURL == null; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("Invalid BoLChunkData attributes: null-requestToken="); - sb.append(nullRequestToken); - sb.append("; nul-fromSURL="); - sb.append(nullFromSURL); - sb.append("; null-lifeTime="); - sb.append(nullLifeTime); - sb.append("; null-dirOption="); - sb.append(nullDirOption); - sb.append("; null-transferProtocols="); - sb.append(nullTransferProtocols); - sb.append("; null-fileSize="); - sb.append(nullFileSize); - sb.append("; null-status="); - sb.append(nullStatus); - sb.append("; null-transferURL="); - sb.append(nullTransferURL); - sb.append("."); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidCopyChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidCopyChunkDataAttributesException.java deleted file mode 100644 index 86d657c75..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidCopyChunkDataAttributesException.java +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TReturnStatus; - -/** - * This class represents an exceptin thrown when the attributes supplied to the - * constructor of CopyChunkData are invalid, that is if any of the following is - * _null_: requestToken, fromsURL, toSURL, lifetime, fileStorageType, - * spaceToken, overwriteOption, status. - * - * @author EGRID - ICTP Trieste - * @date September, 2005 - * @version 2.0 - */ -public class InvalidCopyChunkDataAttributesException extends Exception { - - private static final long serialVersionUID = 6786154038995023512L; - - // booleans that indicate whether the corresponding variable is null - private boolean nullRequestToken; - private boolean nullFromSURL; - private boolean nullToSURL; - private boolean nullLifetime; - private boolean nullFileStorageType; - private boolean nullSpaceToken; - private boolean nullOverwriteOption; - private boolean nullStatus; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidCopyChunkDataAttributesException(TRequestToken requestToken, - TSURL fromSURL, TSURL toSURL, TLifeTimeInSeconds lifetime, - TFileStorageType fileStorageType, TSpaceToken spaceToken, - TOverwriteMode overwriteOption, TReturnStatus status) { - - nullRequestToken = requestToken == null; - nullFromSURL = fromSURL == null; - nullToSURL = toSURL == null; - nullLifetime = lifetime == null; - nullFileStorageType = fileStorageType == null; - nullSpaceToken = spaceToken == null; - nullOverwriteOption = overwriteOption == null; - nullStatus = status == null; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("Invalid CopyChunkData attributes: null-requestToken="); - sb.append(nullRequestToken); - sb.append("; null-fromSURL="); - sb.append(nullFromSURL); - sb.append("; null-toSURL="); - sb.append(nullToSURL); - sb.append("; null-lifetime="); - sb.append(nullLifetime); - sb.append("; null-filestorageType="); - sb.append(nullFileStorageType); - sb.append("; null-spaceToken="); - sb.append(nullSpaceToken); - sb.append("; null-overwriteOption="); - sb.append(nullOverwriteOption); - sb.append("; null-status="); - sb.append(nullStatus); - sb.append("."); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidCopyDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidCopyDataAttributesException.java deleted file mode 100644 index c31ed841a..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidCopyDataAttributesException.java +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSpaceToken; - -/** - * @author Michele Dibenedetto - * - */ -public class InvalidCopyDataAttributesException extends - InvalidSurlRequestDataAttributesException { - - private static final long serialVersionUID = -1217486426437414490L; - protected boolean nullDestinationSURL; - protected boolean nullLifetime; - protected boolean nullFileStorageType; - protected boolean nullSpaceToken; - protected boolean nullOverwriteOption; - - public InvalidCopyDataAttributesException(TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, TReturnStatus status) { - - super(SURL, status); - init(destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption); - } - - public InvalidCopyDataAttributesException(TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, - TReturnStatus status, String message) { - - super(SURL, status, message); - init(destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption); - } - - public InvalidCopyDataAttributesException(TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, - TReturnStatus status, Throwable cause) { - - super(SURL, status, cause); - init(destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption); - } - - public InvalidCopyDataAttributesException(TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, - TReturnStatus status, String message, Throwable cause) { - - super(SURL, status, message, cause); - init(destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption); - } - - private void init(TSURL destinationSURL, TLifeTimeInSeconds lifetime, - TFileStorageType fileStorageType, TSpaceToken spaceToken, - TOverwriteMode overwriteOption) { - - nullDestinationSURL = destinationSURL == null; - nullLifetime = lifetime == null; - nullFileStorageType = fileStorageType == null; - nullSpaceToken = spaceToken == null; - nullOverwriteOption = overwriteOption == null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("InvalidCopyDataAttributesException [nullDestinationSURL="); - builder.append(nullDestinationSURL); - builder.append(", nullLifetime="); - builder.append(nullLifetime); - builder.append(", nullFileStorageType="); - builder.append(nullFileStorageType); - builder.append(", nullSpaceToken="); - builder.append(nullSpaceToken); - builder.append(", nullOverwriteOption="); - builder.append(nullOverwriteOption); - builder.append(", nullSURL="); - builder.append(nullSURL); - builder.append(", nullStatus="); - builder.append(nullStatus); - builder.append("]"); - return builder.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidCopyPersistentChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidCopyPersistentChunkDataAttributesException.java deleted file mode 100644 index 4259b4db2..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidCopyPersistentChunkDataAttributesException.java +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSpaceToken; - -/** - * @author Michele Dibenedetto - * - */ -public class InvalidCopyPersistentChunkDataAttributesException extends - InvalidCopyDataAttributesException { - - /** - * - */ - private static final long serialVersionUID = 1266996505954208061L; - private boolean nullRequestToken; - - public InvalidCopyPersistentChunkDataAttributesException( - TRequestToken requestToken, TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, TReturnStatus status) { - - super(SURL, destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption, status); - init(requestToken); - } - - public InvalidCopyPersistentChunkDataAttributesException( - TRequestToken requestToken, TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, - TReturnStatus status, String message) { - - super(SURL, destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption, status, message); - init(requestToken); - } - - public InvalidCopyPersistentChunkDataAttributesException( - TRequestToken requestToken, TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, - TReturnStatus status, Throwable cause) { - - super(SURL, destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption, status, cause); - init(requestToken); - } - - public InvalidCopyPersistentChunkDataAttributesException( - TRequestToken requestToken, TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, - TReturnStatus status, String message, Throwable cause) { - - super(SURL, destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption, status, message, cause); - init(requestToken); - } - - private void init(TRequestToken requestToken) { - - nullRequestToken = requestToken == null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder - .append("InvalidCopyPersistentChunkDataAttributesException [nullRequestToken="); - builder.append(nullRequestToken); - builder.append(", nullDestinationSURL="); - builder.append(nullDestinationSURL); - builder.append(", nullLifetime="); - builder.append(nullLifetime); - builder.append(", nullFileStorageType="); - builder.append(nullFileStorageType); - builder.append(", nullSpaceToken="); - builder.append(nullSpaceToken); - builder.append(", nullOverwriteOption="); - builder.append(nullOverwriteOption); - builder.append(", nullSURL="); - builder.append(nullSURL); - builder.append(", nullStatus="); - builder.append(nullStatus); - builder.append("]"); - return builder.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidReducedCopyChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidReducedCopyChunkDataAttributesException.java deleted file mode 100644 index 65235db54..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidReducedCopyChunkDataAttributesException.java +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TReturnStatus; - -/** - * This class represents an exception thrown when the attributes supplied to the - * constructor of ReducedCopyChunkData are invalid, that is if any of the - * following is _null_: fromsURL, toSURL, status. - * - * @author Michele Dibenedetto - */ -@SuppressWarnings("serial") -public class InvalidReducedCopyChunkDataAttributesException extends Exception { - - // booleans that indicate whether the corresponding variable is null - private boolean nullFromSURL; - private boolean nullToSURL; - private boolean nullStatus; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidReducedCopyChunkDataAttributesException(TSURL fromSURL, - TSURL toSURL, TReturnStatus status) { - - nullFromSURL = fromSURL == null; - nullToSURL = toSURL == null; - nullStatus = status == null; - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("Invalid CopyChunkData attributes: null-requestToken="); - sb.append("; null-fromSURL="); - sb.append(nullFromSURL); - sb.append("; null-toSURL="); - sb.append(nullToSURL); - sb.append("; null-status="); - sb.append(nullStatus); - sb.append("."); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidRetrievedDataException.java b/src/main/java/it/grid/storm/catalogs/InvalidRetrievedDataException.java deleted file mode 100644 index ddce2846c..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidRetrievedDataException.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -/** - * Class that represents an Exception thrown by the ReservedSpaceCatalog when it - * is asked to retrieve info from the persistence but the raw data is invalid - * and does not allow a well-formed domain obejcts to be created. - * - * @author: EGRID ICTP - * @version: 1.0 - * @date: June 2005 - */ -public class InvalidRetrievedDataException extends Exception { - - private static final long serialVersionUID = -3645913441787012438L; - - private String requestToken; - private String requestType; - private int totalFilesInThisRequest; - private int numOfQueuedRequests; - private int numOfProgressing; - private int numFinished; - private boolean isSuspended; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidRetrievedDataException(String requestToken, String requestType, - int totalFilesInThisRequest, int numOfQueuedRequests, - int numOfProgressingRequests, int numFinished, boolean isSuspended) { - - this.requestToken = requestToken; - this.requestType = requestType; - this.totalFilesInThisRequest = totalFilesInThisRequest; - this.numOfQueuedRequests = numOfQueuedRequests; - this.numOfProgressing = numOfProgressingRequests; - this.numFinished = numFinished; - this.isSuspended = isSuspended; - } - - public String toString() { - - return "InvalidRetrievedDataException: token=" + requestToken + " type=" - + requestType + " total-files=" + totalFilesInThisRequest + " queued=" - + numOfQueuedRequests + " progressing=" + numOfProgressing + " finished=" - + numFinished + " isSusp=" + isSuspended; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/JiTData.java b/src/main/java/it/grid/storm/catalogs/JiTData.java deleted file mode 100644 index 4ef357735..000000000 --- a/src/main/java/it/grid/storm/catalogs/JiTData.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -/** - * Class that represents data associated to JiT entries. It contains a String - * representing the file, an int representing the ACL, an int representing the - * user UID, an int representing the user GID. - * - * @author EGRID - ICTP Trieste - * @version 1.0 - * @date November 2006 - */ -public class JiTData { - - private String file = ""; - private int uid = -1; - private int gid = -1; - private int acl = -1; - - /** - * Constructor requiring the complete name of the file as String, the acl as - * int, the uid and primary gid of the LocalUser bith as int. - */ - public JiTData(String file, int acl, int uid, int gid) { - - this.file = file; - this.acl = acl; - this.uid = uid; - this.gid = gid; - } - - public String pfn() { - - return file; - } - - public int acl() { - - return acl; - } - - public int uid() { - - return uid; - } - - public int gid() { - - return gid; - } - - public String toString() { - - return "file=" + file + " acl=" + acl + " uid=" + uid + " gid=" + gid; - } -} diff --git a/src/main/java/it/grid/storm/catalogs/MultipleDataEntriesException.java b/src/main/java/it/grid/storm/catalogs/MultipleDataEntriesException.java deleted file mode 100644 index fa03e3c3c..000000000 --- a/src/main/java/it/grid/storm/catalogs/MultipleDataEntriesException.java +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TRequestToken; - -/** - * Class that represents an Exception thrown by the ReservedSpaceCatalog when it - * finds more than one row of data for the specified request. - * - * @author: EGRID ICTP - * @version: 1.0 - * @date: June 2005 - */ -public class MultipleDataEntriesException extends Exception { - - private static final long serialVersionUID = 427636739469695868L; - - private TRequestToken requestToken; - - /** - * Constructor tha trequires the attributes that caused the exception to be - * thrown. - */ - public MultipleDataEntriesException(TRequestToken requestToken) { - - this.requestToken = requestToken; - } - - public String toString() { - - return "MultipleDataEntriesException: requestToken=" + requestToken; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/NoDataFoundException.java b/src/main/java/it/grid/storm/catalogs/NoDataFoundException.java deleted file mode 100644 index 548f0df9f..000000000 --- a/src/main/java/it/grid/storm/catalogs/NoDataFoundException.java +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TRequestToken; - -/** - * Class that represents an Exception thrown by the ReservedSpaceCatalog when it - * finds no data for the specified request. - * - * @author: EGRID ICTP - * @version: 1.0 - * @date: June 2005 - */ -public class NoDataFoundException extends Exception { - - private static final long serialVersionUID = -718255813130266566L; - - private TRequestToken requestToken; - - /** - * Constructor tha trequires the attributes that caused the exception to be - * thrown. - */ - public NoDataFoundException(TRequestToken requestToken) { - - this.requestToken = requestToken; - } - - public String toString() { - - return "NoDataFoundException: requestToken=" + requestToken; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/OverwriteModeConverter.java b/src/main/java/it/grid/storm/catalogs/OverwriteModeConverter.java deleted file mode 100644 index 45ba54d1e..000000000 --- a/src/main/java/it/grid/storm/catalogs/OverwriteModeConverter.java +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import java.util.Map; -import java.util.HashMap; -import java.util.Iterator; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.config.Configuration; - -/** - * Package private auxiliary class used to convert between DB and StoRM object - * model representation of TOverwriteMode. - * - * @author: EGRID ICTP - * @version: 2.0 - * @date: June 2005 - */ -public class OverwriteModeConverter { - - private Map DBtoSTORM = new HashMap(); - private Map STORMtoDB = new HashMap(); - - private static OverwriteModeConverter c = new OverwriteModeConverter(); - - /** - * Private constructor that fills in the conversion table; in particular, DB - * uses String values to represent TOverwriteMode: - * - * N NEVER A ALWAYS D WHENFILESAREDIFFERENT - */ - private OverwriteModeConverter() { - - DBtoSTORM.put("N", TOverwriteMode.NEVER); - DBtoSTORM.put("A", TOverwriteMode.ALWAYS); - DBtoSTORM.put("D", TOverwriteMode.WHENFILESAREDIFFERENT); - Object aux; - for (Iterator i = DBtoSTORM.keySet().iterator(); i.hasNext();) { - aux = i.next(); - STORMtoDB.put(DBtoSTORM.get(aux), aux); - } - } - - /** - * Method that returns the only instance of OverwriteModeConverter. - */ - public static OverwriteModeConverter getInstance() { - - return c; - } - - /** - * Method that returns the int used by DPM to represent the given - * TOverwriteMode. "" is returned if no match is found. - */ - public String toDB(TOverwriteMode om) { - - String aux = (String) STORMtoDB.get(om); - if (aux == null) - return ""; - return aux; - } - - /** - * Method that returns the TOverwriteMode used by StoRM to represent the - * supplied String representation of DPM. A configured default TOverwriteMode - * is returned in case no corresponding StoRM type is found. - * TOverwriteMode.EMPTY is returned if there are configuration errors. - */ - public TOverwriteMode toSTORM(String s) { - - TOverwriteMode aux = (TOverwriteMode) DBtoSTORM.get(s); - if (aux == null) - aux = (TOverwriteMode) DBtoSTORM.get(Configuration.getInstance() - .getDefaultOverwriteMode()); - if (aux == null) - return TOverwriteMode.EMPTY; - else - return aux; - } - - public String toString() { - - return "OverWriteModeConverter.\nDBtoSTORM map:" + DBtoSTORM - + "\nSTORMtoDB map:" + STORMtoDB; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/PinLifetimeConverter.java b/src/main/java/it/grid/storm/catalogs/PinLifetimeConverter.java deleted file mode 100644 index 8a111afda..000000000 --- a/src/main/java/it/grid/storm/catalogs/PinLifetimeConverter.java +++ /dev/null @@ -1,76 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.config.Configuration; - -/** - * Class that handles DB representation of a TLifetimeInSeconds, in particular - * it takes care of protocol specification: - * - * 0/null/negative are translated as default StoRM configurable values. StoRMs - * Empty TLifeTimeInSeconds is translated as 0. - * - * @author EGRID ICTP - * @version 1.0 - * @date March 2007 - */ -public class PinLifetimeConverter { - - private static PinLifetimeConverter stc = new PinLifetimeConverter(); // only - // instance - - private PinLifetimeConverter() { - - } - - /** - * Method that returns the only instance of SizeInBytesIntConverter - */ - public static PinLifetimeConverter getInstance() { - - return stc; - } - - /** - * Method that translates the Empty TLifeTimeInSeconds into the empty - * representation of DB which is 0. Any other value is left as is. - */ - public int toDB(long l) { - - if (l == TLifeTimeInSeconds.makeEmpty().value()) - return 0; - return new Long(l).intValue(); - } - - /** - * Method that returns the long corresponding to the int value in the DB, - * except if it is 0, NULL or negative; a configurable default value is - * returned instead, corresponding to the getPinLifetimeMinimum() - * Configuration class method. - */ - public long toStoRM(int s) { - - if (s == 0) { - return Configuration.getInstance().getPinLifetimeDefault(); - } else if (s < 0) { - // The default is used also as a Minimum - return Configuration.getInstance().getPinLifetimeDefault(); - } - return new Integer(s).longValue(); - } - - public long toStoRM(long s) { - - if (s == 0) { - return Configuration.getInstance().getPinLifetimeDefault(); - } else if (s < 0) { - // The default is used also as a Minimum - return Configuration.getInstance().getPinLifetimeDefault(); - } - return s; - } -} diff --git a/src/main/java/it/grid/storm/catalogs/PtGChunkCatalog.java b/src/main/java/it/grid/storm/catalogs/PtGChunkCatalog.java index 31723b38c..8f25ccd8a 100644 --- a/src/main/java/it/grid/storm/catalogs/PtGChunkCatalog.java +++ b/src/main/java/it/grid/storm/catalogs/PtGChunkCatalog.java @@ -4,18 +4,33 @@ */ package it.grid.storm.catalogs; -import it.grid.storm.common.types.SizeUnit; +import java.util.ArrayList; +import java.util.Collection; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.common.types.TURLPrefix; import it.grid.storm.common.types.TimeUnit; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; import it.grid.storm.griduser.AbstractGridUser; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; +import it.grid.storm.persistence.converter.PinLifetimeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.converter.TURLConverter; +import it.grid.storm.persistence.converter.TransferProtocolListConverter; +import it.grid.storm.persistence.dao.PtGChunkDAO; +import it.grid.storm.persistence.exceptions.InvalidReducedPtGChunkDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.persistence.impl.mysql.PtGChunkDAOMySql; +import it.grid.storm.persistence.model.PtGChunkDataTO; +import it.grid.storm.persistence.model.PtGPersistentChunkData; +import it.grid.storm.persistence.model.ReducedPtGChunkData; +import it.grid.storm.persistence.model.ReducedPtGChunkDataTO; import it.grid.storm.srm.types.InvalidTDirOptionAttributesException; -import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; import it.grid.storm.srm.types.InvalidTSURLAttributesException; import it.grid.storm.srm.types.InvalidTSizeAttributesException; -import it.grid.storm.srm.types.InvalidTTURLAttributesException; import it.grid.storm.srm.types.TDirOption; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TRequestToken; @@ -25,820 +40,340 @@ import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.srm.types.TTURL; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Timer; -import java.util.TimerTask; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Class that represents StoRMs PtGChunkCatalog: it collects PtGChunkData and - * provides methods for looking up a PtGChunkData based on TRequestToken, as - * well as for adding a new entry and removing an existing one. - * - * @author EGRID - ICTP Trieste - * @date April 26th, 2005 - * @version 4.0 - */ -@SuppressWarnings("unused") public class PtGChunkCatalog { - private static final Logger log = LoggerFactory - .getLogger(PtGChunkCatalog.class); - - /* Only instance of PtGChunkCatalog present in StoRM! */ - private static final PtGChunkCatalog cat = new PtGChunkCatalog(); - private final PtGChunkDAO dao = PtGChunkDAO.getInstance(); - - /* - * Timer object in charge of transiting expired requests from SRM_FILE_PINNED - * to SRM_RELEASED! - */ - private final Timer transiter = new Timer(); - /* Delay time before starting cleaning thread! */ - private final long delay = Configuration.getInstance() - .getTransitInitialDelay() * 1000; - /* Period of execution of cleaning! */ - private final long period = Configuration.getInstance() - .getTransitTimeInterval() * 1000; - - /** - * Private constructor that starts the internal timer needed to periodically - * check and transit requests whose pinLifetime has expired and are in - * SRM_FILE_PINNED, to SRM_RELEASED. - */ - private PtGChunkCatalog() { - - TimerTask transitTask = new TimerTask() { - - @Override - public void run() { - - transitExpiredSRM_FILE_PINNED(); - } - }; - transiter.scheduleAtFixedRate(transitTask, delay, period); - } - - /** - * Method that returns the only instance of PtGChunkCatalog available. - */ - public static PtGChunkCatalog getInstance() { - - return cat; - } - - /** - * Method used to update into Persistence a retrieved PtGChunkData. In case - * any error occurs, the operation does not proceed but no Exception is - * thrown. Error messages get logged. - * - * Only fileSize, StatusCode, errString and transferURL are updated. Likewise - * for the request pinLifetime. - */ - synchronized public void update(PtGPersistentChunkData chunkData) { - - PtGChunkDataTO to = new PtGChunkDataTO(); - /* Primary key needed by DAO Object */ - to.setPrimaryKey(chunkData.getPrimaryKey()); - to.setFileSize(chunkData.getFileSize().value()); - to.setStatus(StatusCodeConverter.getInstance().toDB( - chunkData.getStatus().getStatusCode())); - to.setErrString(chunkData.getStatus().getExplanation()); - to.setTurl(TURLConverter.getInstance().toDB( - chunkData.getTransferURL().toString())); - to.setLifeTime(PinLifetimeConverter.getInstance().toDB( - chunkData.getPinLifeTime().value())); - to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); - to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId())); - to.setClientDN(chunkData.getUser().getDn()); - if (chunkData.getUser() instanceof AbstractGridUser) { - if (((AbstractGridUser) chunkData.getUser()).hasVoms()) { - to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()) - .getFQANsAsString()); - } - - } - dao.update(to); - } - - /** - * Refresh method. THIS IS A WORK IN PROGRESS!!!! This method have to synch - * the ChunkData information with the database status intended as the status - * code and the TURL - * - * @param auxTO - * @param PtGChunkData - * inputChunk - * @return PtGChunkData outputChunk - */ - synchronized public PtGPersistentChunkData refreshStatus( - PtGPersistentChunkData inputChunk) { - - PtGChunkDataTO chunkDataTO = dao.refresh(inputChunk.getPrimaryKey()); - - log.debug("PtG CHUNK CATALOG: retrieved data " + chunkDataTO); - if (chunkDataTO == null) { - log.warn("PtG CHUNK CATALOG! Empty TO found in persistence for specified " - + "request: {}", inputChunk.getPrimaryKey()); - return inputChunk; - } - - /* - * In this first version the only field updated is the Status. Once - * updated, the new status is rewritten into the input ChunkData - */ - - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM(chunkDataTO.status()); - if (code != TStatusCode.EMPTY) { - status = new TReturnStatus(code, chunkDataTO.errString()); - } - inputChunk.setStatus(status); - TTURL turl = null; - try { - turl = TTURL.makeFromString(chunkDataTO.turl()); - } catch (InvalidTTURLAttributesException e) { - log.info("PtGChunkCatalog (FALSE-ERROR-in-abort-refresh-status?):" - + " built a TURL with protocol NULL (retrieved from the DB..)"); - } - inputChunk.setTransferURL(turl); - return inputChunk; - } - - /** - * Method that returns a Collection of PtGChunkData Objects matching the - * supplied TRequestToken. - * - * If any of the data associated to the TRequestToken is not well formed and - * so does not allow a PtGChunkData Object to be created, then that part of - * the request is dropped and gets logged, and the processing continues with - * the next part. All valid chunks get returned: the others get dropped. - * - * If there are no chunks to process then an empty Collection is returned, and - * a messagge gets logged. - */ - synchronized public Collection lookup(TRequestToken rt) { - - Collection chunkTOs = dao.find(rt); - log.debug("PtG CHUNK CATALOG: retrieved data " + chunkTOs); - ArrayList list = new ArrayList(); - if (chunkTOs.isEmpty()) { - log.warn("PtG CHUNK CATALOG! No chunks found in persistence for " - + "specified request: {}", rt); - return list; - } - PtGPersistentChunkData chunk; - for (PtGChunkDataTO chunkTO : chunkTOs) { - chunk = makeOne(chunkTO, rt); - if (chunk == null) { - continue; - } - list.add(chunk); - if (isComplete(chunkTO)) { - continue; - } - try { - dao.updateIncomplete(this.completeTO(chunkTO, chunk)); - } catch (InvalidReducedPtGChunkDataAttributesException e) { - log.warn("PtG CHUNK CATALOG! unable to add missing informations on DB " - + "to the request: {}", e.getMessage()); - } - } - log.debug("PtG CHUNK CATALOG: returning " + list); - return list; - } - - /** - * Generates a PtGChunkData from the received PtGChunkDataTO - * - * @param chunkDataTO - * @param rt - * @return - */ - private PtGPersistentChunkData makeOne(PtGChunkDataTO chunkDataTO, - TRequestToken rt) { - - StringBuilder errorSb = new StringBuilder(); - TSURL fromSURL = null; - try { - fromSURL = TSURL.makeFromStringValidate(chunkDataTO.fromSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (chunkDataTO.normalizedStFN() != null) { - fromSURL.setNormalizedStFN(chunkDataTO.normalizedStFN()); - } - if (chunkDataTO.surlUniqueID() != null) { - fromSURL.setUniqueID(chunkDataTO.surlUniqueID().intValue()); - } - // lifeTime - TLifeTimeInSeconds lifeTime = null; - try { - long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM( - chunkDataTO.lifeTime()); - // Check for max value allowed - long max = Configuration.getInstance().getPinLifetimeMaximum(); - if (pinLifeTime > max) { - log.warn("PinLifeTime is greater than the max value allowed." - + " Drop the value to the max = {} seconds", max); - pinLifeTime = max; - } - lifeTime = TLifeTimeInSeconds.make((pinLifeTime), TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // dirOption - TDirOption dirOption = null; - try { - dirOption = new TDirOption(chunkDataTO.dirOption(), - chunkDataTO.allLevelRecursive(), chunkDataTO.numLevel()); - } catch (InvalidTDirOptionAttributesException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // transferProtocols - TURLPrefix transferProtocols = TransferProtocolListConverter - .toSTORM(chunkDataTO.protocolList()); - if (transferProtocols.size() == 0) { - errorSb.append("\nEmpty list of TransferProtocols or could " - + "not translate TransferProtocols!"); - /* fail construction of PtGChunkData! */ - transferProtocols = null; - } - // fileSize - TSizeInBytes fileSize = null; - try { - fileSize = TSizeInBytes.make(chunkDataTO.fileSize(), SizeUnit.BYTES); - } catch (InvalidTSizeAttributesException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM( - chunkDataTO.status()); - if (code == TStatusCode.EMPTY) { - errorSb.append("\nRetrieved StatusCode was not recognised: " - + chunkDataTO.status()); - } else { - status = new TReturnStatus(code, chunkDataTO.errString()); - } - GridUserInterface gridUser = null; - try { - if (chunkDataTO.vomsAttributes() != null - && !chunkDataTO.vomsAttributes().trim().equals("")) { - gridUser = GridUserManager.makeVOMSGridUser(chunkDataTO.clientDN(), - chunkDataTO.vomsAttributesArray()); - } else { - gridUser = GridUserManager.makeGridUser(chunkDataTO.clientDN()); - } - - } catch (IllegalArgumentException e) { - log.error("Unexpected error on voms grid user creation." - + " IllegalArgumentException: {}", e.getMessage(), e); - } - // transferURL - /* - * whatever is read is just meaningless because PtG will fill it in!!! So - * create an Empty TTURL by default! Vital to avoid problems with unknown - * DPM NULL/EMPTY logic policy! - */ - TTURL transferURL = TTURL.makeEmpty(); - // make PtGChunkData - PtGPersistentChunkData aux = null; - try { - aux = new PtGPersistentChunkData(gridUser, rt, fromSURL, lifeTime, - dirOption, transferProtocols, fileSize, status, transferURL); - aux.setPrimaryKey(chunkDataTO.primaryKey()); - } catch (InvalidSurlRequestDataAttributesException e) { - dao.signalMalformedPtGChunk(chunkDataTO); - log.warn("PtG CHUNK CATALOG! Retrieved malformed PtG chunk data from " - + "persistence. Dropping chunk from request {}", rt); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } - // end... - return aux; - } - - /** - * - * Adds to the received PtGChunkDataTO the normalized StFN and the SURL unique - * ID taken from the PtGChunkData - * - * @param chunkTO - * @param chunk - */ - private void completeTO(ReducedPtGChunkDataTO chunkTO, - final ReducedPtGChunkData chunk) { - - chunkTO.setNormalizedStFN(chunk.fromSURL().normalizedStFN()); - chunkTO.setSurlUniqueID(new Integer(chunk.fromSURL().uniqueId())); - } - - /** - * - * Creates a ReducedPtGChunkDataTO from the received PtGChunkDataTO and - * completes it with the normalized StFN and the SURL unique ID taken from the - * PtGChunkData - * - * @param chunkTO - * @param chunk - * @return - * @throws InvalidReducedPtGChunkDataAttributesException - */ - private ReducedPtGChunkDataTO completeTO(PtGChunkDataTO chunkTO, - final PtGPersistentChunkData chunk) - throws InvalidReducedPtGChunkDataAttributesException { - - ReducedPtGChunkDataTO reducedChunkTO = this.reduce(chunkTO); - this.completeTO(reducedChunkTO, this.reduce(chunk)); - return reducedChunkTO; - } - - /** - * Creates a ReducedPtGChunkData from the data contained in the received - * PtGChunkData - * - * @param chunk - * @return - * @throws InvalidReducedPtGChunkDataAttributesException - */ - private ReducedPtGChunkData reduce(PtGPersistentChunkData chunk) - throws InvalidReducedPtGChunkDataAttributesException { - - ReducedPtGChunkData reducedChunk = new ReducedPtGChunkData(chunk.getSURL(), - chunk.getStatus()); - reducedChunk.setPrimaryKey(chunk.getPrimaryKey()); - return reducedChunk; - } - - /** - * Creates a ReducedPtGChunkDataTO from the data contained in the received - * PtGChunkDataTO - * - * @param chunkTO - * @return - */ - private ReducedPtGChunkDataTO reduce(PtGChunkDataTO chunkTO) { - - ReducedPtGChunkDataTO reducedChunkTO = new ReducedPtGChunkDataTO(); - reducedChunkTO.setPrimaryKey(chunkTO.primaryKey()); - reducedChunkTO.setFromSURL(chunkTO.fromSURL()); - reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN()); - reducedChunkTO.setSurlUniqueID(chunkTO.surlUniqueID()); - reducedChunkTO.setStatus(chunkTO.status()); - reducedChunkTO.setErrString(chunkTO.errString()); - return reducedChunkTO; - } - - /** - * Checks if the received PtGChunkDataTO contains the fields not set by the - * front end but required - * - * @param chunkTO - * @return - */ - private boolean isComplete(PtGChunkDataTO chunkTO) { - - return (chunkTO.normalizedStFN() != null) - && (chunkTO.surlUniqueID() != null); - } - - /** - * Checks if the received ReducedPtGChunkDataTO contains the fields not set by - * the front end but required - * - * @param reducedChunkTO - * @return - */ - private boolean isComplete(ReducedPtGChunkDataTO reducedChunkTO) { - - return (reducedChunkTO.normalizedStFN() != null) - && (reducedChunkTO.surlUniqueID() != null); - } - - /** - * Method that returns a Collection of ReducedPtGChunkData Objects associated - * to the supplied TRequestToken. - * - * If any of the data retrieved for a given chunk is not well formed and so - * does not allow a ReducedPtGChunkData Object to be created, then that chunk - * is dropped and gets logged, while processing continues with the next one. - * All valid chunks get returned: the others get dropped. - * - * If there are no chunks associated to the given TRequestToken, then an empty - * Collection is returned and a message gets logged. - */ - synchronized public Collection lookupReducedPtGChunkData( - TRequestToken rt) { - - Collection reducedChunkDataTOs = dao.findReduced(rt - .getValue()); - log.debug("PtG CHUNK CATALOG: retrieved data {}", reducedChunkDataTOs); - ArrayList list = new ArrayList(); - if (reducedChunkDataTOs.isEmpty()) { - log.debug("PtG CHUNK CATALOG! No chunks found in persistence for {}", rt); - } else { - ReducedPtGChunkData reducedChunkData = null; - for (ReducedPtGChunkDataTO reducedChunkDataTO : reducedChunkDataTOs) { - reducedChunkData = makeOneReduced(reducedChunkDataTO); - if (reducedChunkData != null) { - list.add(reducedChunkData); - if (!this.isComplete(reducedChunkDataTO)) { - this.completeTO(reducedChunkDataTO, reducedChunkData); - dao.updateIncomplete(reducedChunkDataTO); - } - } - } - log.debug("PtG CHUNK CATALOG: returning {}", list); - } - return list; - } - - public Collection lookupReducedPtGChunkData( - TRequestToken requestToken, Collection surls) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.findReduced( - requestToken, surlsUniqueIDs, surlsArray); - log.debug("PtG CHUNK CATALOG: retrieved data {}", chunkDataTOCollection); - return buildReducedChunkDataList(chunkDataTOCollection); - } - - public Collection lookupPtGChunkData(TSURL surl, - GridUserInterface user) { - - return lookupPtGChunkData(Arrays.asList(new TSURL[] { surl }), user); - } - - public Collection lookupPtGChunkData(TSURL surl) { - - return lookupPtGChunkData(Arrays.asList(new TSURL[] { surl })); - } - - public Collection lookupPtGChunkData( - List surls, GridUserInterface user) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.find(surlsUniqueIDs, - surlsArray, user.getDn()); - log.debug("PtG CHUNK CATALOG: retrieved data {}", chunkDataTOCollection); - return buildChunkDataList(chunkDataTOCollection); - } - - public Collection lookupPtGChunkData(List surls) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.find(surlsUniqueIDs, - surlsArray); - log.debug("PtG CHUNK CATALOG: retrieved data {}", chunkDataTOCollection); - return buildChunkDataList(chunkDataTOCollection); - } - - private Collection buildChunkDataList( - Collection chunkDataTOCollection) { - - ArrayList list = new ArrayList(); - PtGPersistentChunkData chunk; - for (PtGChunkDataTO chunkTO : chunkDataTOCollection) { - chunk = makeOne(chunkTO); - if (chunk == null) { - continue; - } - list.add(chunk); - if (isComplete(chunkTO)) { - continue; - } - try { - dao.updateIncomplete(this.completeTO(chunkTO, chunk)); - } catch (InvalidReducedPtGChunkDataAttributesException e) { - log.warn("PtG CHUNK CATALOG! unable to add missing informations on " - + "DB to the request: ", e.getMessage()); - } - } - return list; - } - - private PtGPersistentChunkData makeOne(PtGChunkDataTO chunkTO) { - - try { - return makeOne(chunkTO, - new TRequestToken(chunkTO.requestToken(), chunkTO.timeStamp())); - } catch (InvalidTRequestTokenAttributesException e) { - throw new IllegalStateException( - "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: " - + e); - } - } - - /** - * Method that returns a Collection of ReducedPtGChunkData Objects matching - * the supplied GridUser and Collection of TSURLs. If any of the data - * retrieved for a given chunk is not well formed and so does not allow a - * ReducedPtGChunkData Object to be created, then that chunk is dropped and - * gets logged, while processing continues with the next one. All valid chunks - * get returned: the others get dropped. If there are no chunks associated to - * the given GridUser and Collection of TSURLs, then an empty Collection is - * returned and a message gets logged. - */ - synchronized public Collection lookupReducedPtGChunkData( - GridUserInterface gu, Collection tsurlCollection) { - - int[] surlsUniqueIDs = new int[tsurlCollection.size()]; - String[] surls = new String[tsurlCollection.size()]; - int index = 0; - for (TSURL tsurl : tsurlCollection) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surls[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.findReduced( - gu.getDn(), surlsUniqueIDs, surls); - log.debug("PtG CHUNK CATALOG: retrieved data {}", chunkDataTOCollection); - return buildReducedChunkDataList(chunkDataTOCollection); - } - - private Collection buildReducedChunkDataList( - Collection chunkDataTOCollection) { - - ArrayList list = new ArrayList(); - ReducedPtGChunkData reducedChunkData; - for (ReducedPtGChunkDataTO reducedChunkDataTO : chunkDataTOCollection) { - reducedChunkData = makeOneReduced(reducedChunkDataTO); - if (reducedChunkData != null) { - list.add(reducedChunkData); - if (!isComplete(reducedChunkDataTO)) { - completeTO(reducedChunkDataTO, reducedChunkData); - dao.updateIncomplete(reducedChunkDataTO); - } - } - } - log.debug("PtG CHUNK CATALOG: returning {}",list); - return list; - } - - /** - * - * - * @param reducedChunkDataTO - * @return - */ - private ReducedPtGChunkData makeOneReduced( - ReducedPtGChunkDataTO reducedChunkDataTO) { - - StringBuilder errorSb = new StringBuilder(); - // fromSURL - TSURL fromSURL = null; - try { - fromSURL = TSURL.makeFromStringValidate(reducedChunkDataTO.fromSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (reducedChunkDataTO.normalizedStFN() != null) { - fromSURL.setNormalizedStFN(reducedChunkDataTO.normalizedStFN()); - } - if (reducedChunkDataTO.surlUniqueID() != null) { - fromSURL.setUniqueID(reducedChunkDataTO.surlUniqueID().intValue()); - } - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM( - reducedChunkDataTO.status()); - if (code == TStatusCode.EMPTY) { - errorSb.append("\nRetrieved StatusCode was not recognised: " - + reducedChunkDataTO.status()); - } else { - status = new TReturnStatus(code, reducedChunkDataTO.errString()); - } - // make ReducedPtGChunkData - ReducedPtGChunkData aux = null; - try { - aux = new ReducedPtGChunkData(fromSURL, status); - aux.setPrimaryKey(reducedChunkDataTO.primaryKey()); - } catch (InvalidReducedPtGChunkDataAttributesException e) { - log.warn("PtG CHUNK CATALOG! Retrieved malformed Reduced PtG chunk " - + "data from persistence: dropping reduced chunk..."); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } - // end... - return aux; - } - - /** - * Method used to add into Persistence a new entry. The supplied PtGChunkData - * gets the primary key changed to the value assigned in Persistence. - * - * This method is intended to be used by a recursive PtG request: the parent - * request supplies a directory which must be expanded, so all new children - * requests resulting from the files in the directory are added into - * persistence. - * - * So this method does _not_ add a new SRM prepare_to_get request into the DB! - * - * The only children data written into the DB are: sourceSURL, TDirOption, - * statusCode and explanation. - * - * In case of any error the operation does not proceed, but no Exception is - * thrown! Proper messages get logged by underlaying DAO. - */ - synchronized public void addChild(PtGPersistentChunkData chunkData) { - - PtGChunkDataTO to = new PtGChunkDataTO(); - /* needed for now to find ID of request! Must be changed soon! */ - to.setRequestToken(chunkData.getRequestToken().toString()); - to.setFromSURL(chunkData.getSURL().toString()); - to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); - to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId())); - - to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive()); - to.setDirOption(chunkData.getDirOption().isDirectory()); - to.setNumLevel(chunkData.getDirOption().getNumLevel()); - to.setStatus(StatusCodeConverter.getInstance().toDB( - chunkData.getStatus().getStatusCode())); - to.setErrString(chunkData.getStatus().getExplanation()); - to.setClientDN(chunkData.getUser().getDn()); - if (chunkData.getUser() instanceof AbstractGridUser) { - if (((AbstractGridUser) chunkData.getUser()).hasVoms()) { - to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()) - .getFQANsAsString()); - } - - } - /* add the entry and update the Primary Key field! */ - dao.addChild(to); - /* set the assigned PrimaryKey! */ - chunkData.setPrimaryKey(to.primaryKey()); - } - - /** - * Method used to add into Persistence a new entry. The supplied PtGChunkData - * gets the primary key changed to the value assigned in the Persistence. The - * method requires the GridUser to whom associate the added request. - * - * This method is intended to be used by an srmCopy request in push mode which - * implies a local srmPtG. The only fields from PtGChunkData that are - * considered are: the requestToken, the sourceSURL, the pinLifetime, the - * dirOption, the protocolList, the status and error string. - * - * So this method _adds_ a new SRM prepare_to_get request into the DB! - * - * In case of any error the operation does not proceed, but no Exception is - * thrown! The underlaying DAO logs proper error messagges. - */ - synchronized public void add(PtGPersistentChunkData chunkData, - GridUserInterface gu) { - - PtGChunkDataTO to = new PtGChunkDataTO(); - to.setRequestToken(chunkData.getRequestToken().toString()); - to.setFromSURL(chunkData.getSURL().toString()); - to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); - to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId())); - - to.setLifeTime(new Long(chunkData.getPinLifeTime().value()).intValue()); - to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive()); - to.setDirOption(chunkData.getDirOption().isDirectory()); - to.setNumLevel(chunkData.getDirOption().getNumLevel()); - to.setProtocolList(TransferProtocolListConverter.toDB(chunkData - .getTransferProtocols())); - to.setStatus(StatusCodeConverter.getInstance().toDB( - chunkData.getStatus().getStatusCode())); - to.setErrString(chunkData.getStatus().getExplanation()); - - to.setClientDN(chunkData.getUser().getDn()); - if (chunkData.getUser() instanceof AbstractGridUser) { - if (((AbstractGridUser) chunkData.getUser()).hasVoms()) { - to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()) - .getFQANsAsString()); - } - - } - - dao.addNew(to, gu.getDn()); // add the entry and update the Primary Key - // field! - chunkData.setPrimaryKey(to.primaryKey()); // set the assigned PrimaryKey! - } - - /** - * Method used to establish if in Persistence there is a PtGChunkData working - * on the supplied SURL, and whose state is SRM_FILE_PINNED, in which case - * true is returned. In case none are found or there is any problem, false is - * returned. This method is intended to be used by srmMv. - */ - synchronized public boolean isSRM_FILE_PINNED(TSURL surl) { - - return (dao.numberInSRM_FILE_PINNED(surl.uniqueId()) > 0); - - } - - /** - * Method used to transit the specified Collection of ReducedPtGChunkData from - * SRM_FILE_PINNED to SRM_RELEASED. Chunks in any other starting state are not - * transited. In case of any error nothing is done, but proper error messages - * get logged by the underlaying DAO. - */ - synchronized public void transitSRM_FILE_PINNEDtoSRM_RELEASED( - Collection chunks, TRequestToken token) { - - if (chunks == null || chunks.isEmpty()) { - return; - } - long[] primaryKeys = new long[chunks.size()]; - int index = 0; - for (ReducedPtGChunkData chunkData : chunks) { - if (chunkData != null) { - primaryKeys[index] = chunkData.primaryKey(); - index++; - } - - } - dao.transitSRM_FILE_PINNEDtoSRM_RELEASED(primaryKeys, token); - for (ReducedPtGChunkData chunkData : chunks) { - if (chunkData != null) { - primaryKeys[index] = chunkData.primaryKey(); - index++; - } - } - } - - /** - * Method used to force transition to SRM_RELEASED from SRM_FILE_PINNED, of - * all PtG Requests whose pinLifetime has expired and the state still has not - * been changed (a user forgot to run srmReleaseFiles)! - */ - synchronized public void transitExpiredSRM_FILE_PINNED() { - - List expiredSurls = dao.transitExpiredSRM_FILE_PINNED(); - } - - public void updateStatus(TRequestToken requestToken, TSURL surl, - TStatusCode statusCode, String explanation) { - - dao.updateStatus(requestToken, new int[] { surl.uniqueId() }, - new String[] { surl.rawSurl() }, statusCode, explanation); - } - - public void updateFromPreviousStatus(TSURL surl, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) { - - dao.updateStatusOnMatchingStatus(new int[] { surl.uniqueId() }, - new String[] { surl.rawSurl() }, expectedStatusCode, newStatusCode, - explanation); - - } - - public void updateFromPreviousStatus(TRequestToken requestToken, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) { - - dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, - newStatusCode, explanation); - } - - public void updateFromPreviousStatus(TRequestToken requestToken, - List surlList, TStatusCode expectedStatusCode, - TStatusCode newStatusCode) { - - int[] surlsUniqueIDs = new int[surlList.size()]; - String[] surls = new String[surlList.size()]; - int index = 0; - for (TSURL tsurl : surlList) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surls[index] = tsurl.rawSurl(); - index++; - } - dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode); - } + private static final Logger log = LoggerFactory.getLogger(PtGChunkCatalog.class); + + private static PtGChunkCatalog instance; + + public static synchronized PtGChunkCatalog getInstance() { + if (instance == null) { + instance = new PtGChunkCatalog(); + } + return instance; + } + + private final PtGChunkDAO dao; + + /** + * Private constructor that starts the internal timer needed to periodically check and transit + * requests whose pinLifetime has expired and are in SRM_FILE_PINNED, to SRM_RELEASED. + */ + private PtGChunkCatalog() { + + dao = PtGChunkDAOMySql.getInstance(); + } + + /** + * Method used to update into Persistence a retrieved PtGChunkData. In case any error occurs, the + * operation does not proceed but no Exception is thrown. Error messages get logged. + * + * Only fileSize, StatusCode, errString and transferURL are updated. Likewise for the request + * pinLifetime. + */ + synchronized public void update(PtGPersistentChunkData chunkData) { + + PtGChunkDataTO to = new PtGChunkDataTO(); + /* Primary key needed by DAO Object */ + to.setPrimaryKey(chunkData.getPrimaryKey()); + to.setFileSize(chunkData.getFileSize().value()); + to.setStatus(StatusCodeConverter.getInstance().toDB(chunkData.getStatus().getStatusCode())); + to.setErrString(chunkData.getStatus().getExplanation()); + to.setTurl(TURLConverter.getInstance().toDB(chunkData.getTransferURL().toString())); + to.setLifeTime(PinLifetimeConverter.getInstance().toDB(chunkData.getPinLifeTime().value())); + to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); + to.setSurlUniqueID(Integer.valueOf(chunkData.getSURL().uniqueId())); + to.setClientDN(chunkData.getUser().getDn()); + if (chunkData.getUser() instanceof AbstractGridUser) { + if (((AbstractGridUser) chunkData.getUser()).hasVoms()) { + to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()).getFQANsAsString()); + } + + } + dao.update(to); + } + + /** + * Method that returns a Collection of PtGChunkData Objects matching the supplied TRequestToken. + * + * If any of the data associated to the TRequestToken is not well formed and so does not allow a + * PtGChunkData Object to be created, then that part of the request is dropped and gets logged, + * and the processing continues with the next part. All valid chunks get returned: the others get + * dropped. + * + * If there are no chunks to process then an empty Collection is returned, and a messagge gets + * logged. + */ + synchronized public Collection lookup(TRequestToken rt) { + + Collection chunkTOs = dao.find(rt); + log.debug("PtG CHUNK CATALOG: retrieved data " + chunkTOs); + ArrayList list = new ArrayList(); + if (chunkTOs.isEmpty()) { + log.warn("PtG CHUNK CATALOG! No chunks found in persistence for " + "specified request: {}", + rt); + return list; + } + PtGPersistentChunkData chunk; + for (PtGChunkDataTO chunkTO : chunkTOs) { + chunk = makeOne(chunkTO, rt); + if (chunk == null) { + continue; + } + list.add(chunk); + if (isComplete(chunkTO)) { + continue; + } + try { + dao.updateIncomplete(this.completeTO(chunkTO, chunk)); + } catch (InvalidReducedPtGChunkDataAttributesException e) { + log.warn( + "PtG CHUNK CATALOG! unable to add missing informations on DB " + "to the request: {}", + e.getMessage()); + } + } + log.debug("PtG CHUNK CATALOG: returning " + list); + return list; + } + + /** + * Generates a PtGChunkData from the received PtGChunkDataTO + * + * @param chunkDataTO + * @param rt + * @return + */ + private PtGPersistentChunkData makeOne(PtGChunkDataTO chunkDataTO, TRequestToken rt) { + + StringBuilder errorSb = new StringBuilder(); + TSURL fromSURL = null; + try { + fromSURL = TSURL.makeFromStringValidate(chunkDataTO.fromSURL()); + } catch (InvalidTSURLAttributesException e) { + errorSb.append(e); + } + if (chunkDataTO.normalizedStFN() != null) { + fromSURL.setNormalizedStFN(chunkDataTO.normalizedStFN()); + } + if (chunkDataTO.surlUniqueID() != null) { + fromSURL.setUniqueID(chunkDataTO.surlUniqueID().intValue()); + } + // lifeTime + TLifeTimeInSeconds lifeTime = null; + try { + long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM(chunkDataTO.lifeTime()); + // Check for max value allowed + long max = StormConfiguration.getInstance().getPinLifetimeMaximum(); + if (pinLifeTime > max) { + log.warn("PinLifeTime is greater than the max value allowed." + + " Drop the value to the max = {} seconds", max); + pinLifeTime = max; + } + lifeTime = TLifeTimeInSeconds.make((pinLifeTime), TimeUnit.SECONDS); + } catch (IllegalArgumentException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // dirOption + TDirOption dirOption = null; + try { + dirOption = new TDirOption(chunkDataTO.dirOption(), chunkDataTO.allLevelRecursive(), + chunkDataTO.numLevel()); + } catch (InvalidTDirOptionAttributesException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // transferProtocols + TURLPrefix transferProtocols = + TransferProtocolListConverter.toSTORM(chunkDataTO.protocolList()); + if (transferProtocols.size() == 0) { + errorSb + .append("\nEmpty list of TransferProtocols or could " + "not translate TransferProtocols!"); + /* fail construction of PtGChunkData! */ + transferProtocols = null; + } + // fileSize + TSizeInBytes fileSize = null; + try { + fileSize = TSizeInBytes.make(chunkDataTO.fileSize()); + } catch (InvalidTSizeAttributesException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // status + TReturnStatus status = null; + TStatusCode code = StatusCodeConverter.getInstance().toSTORM(chunkDataTO.status()); + if (code == TStatusCode.EMPTY) { + errorSb.append("\nRetrieved StatusCode was not recognised: " + chunkDataTO.status()); + } else { + status = new TReturnStatus(code, chunkDataTO.errString()); + } + GridUserInterface gridUser = null; + try { + if (chunkDataTO.vomsAttributes() != null && !chunkDataTO.vomsAttributes().trim().equals("")) { + gridUser = GridUserManager.makeVOMSGridUser(chunkDataTO.clientDN(), + chunkDataTO.vomsAttributesArray()); + } else { + gridUser = GridUserManager.makeGridUser(chunkDataTO.clientDN()); + } + + } catch (IllegalArgumentException e) { + log.error("Unexpected error on voms grid user creation." + " IllegalArgumentException: {}", + e.getMessage(), e); + } + // transferURL + /* + * whatever is read is just meaningless because PtG will fill it in!!! So create an Empty TTURL + * by default! Vital to avoid problems with unknown DPM NULL/EMPTY logic policy! + */ + TTURL transferURL = TTURL.makeEmpty(); + // make PtGChunkData + PtGPersistentChunkData aux = null; + try { + aux = new PtGPersistentChunkData(gridUser, rt, fromSURL, lifeTime, dirOption, + transferProtocols, fileSize, status, transferURL); + aux.setPrimaryKey(chunkDataTO.primaryKey()); + } catch (InvalidSurlRequestDataAttributesException e) { + dao.fail(chunkDataTO); + log.warn("PtG CHUNK CATALOG! Retrieved malformed PtG chunk data from " + + "persistence. Dropping chunk from request {}", rt); + log.warn(e.getMessage(), e); + log.warn(errorSb.toString()); + } + // end... + return aux; + } + + /** + * + * Adds to the received PtGChunkDataTO the normalized StFN and the SURL unique ID taken from the + * PtGChunkData + * + * @param chunkTO + * @param chunk + */ + private void completeTO(ReducedPtGChunkDataTO chunkTO, final ReducedPtGChunkData chunk) { + + chunkTO.setNormalizedStFN(chunk.fromSURL().normalizedStFN()); + chunkTO.setSurlUniqueID(Integer.valueOf(chunk.fromSURL().uniqueId())); + } + + /** + * + * Creates a ReducedPtGChunkDataTO from the received PtGChunkDataTO and completes it with the + * normalized StFN and the SURL unique ID taken from the PtGChunkData + * + * @param chunkTO + * @param chunk + * @return + * @throws InvalidReducedPtGChunkDataAttributesException + */ + private ReducedPtGChunkDataTO completeTO(PtGChunkDataTO chunkTO, + final PtGPersistentChunkData chunk) throws InvalidReducedPtGChunkDataAttributesException { + + ReducedPtGChunkDataTO reducedChunkTO = this.reduce(chunkTO); + this.completeTO(reducedChunkTO, this.reduce(chunk)); + return reducedChunkTO; + } + + /** + * Creates a ReducedPtGChunkData from the data contained in the received PtGChunkData + * + * @param chunk + * @return + * @throws InvalidReducedPtGChunkDataAttributesException + */ + private ReducedPtGChunkData reduce(PtGPersistentChunkData chunk) + throws InvalidReducedPtGChunkDataAttributesException { + + ReducedPtGChunkData reducedChunk = new ReducedPtGChunkData(chunk.getSURL(), chunk.getStatus()); + reducedChunk.setPrimaryKey(chunk.getPrimaryKey()); + return reducedChunk; + } + + /** + * Creates a ReducedPtGChunkDataTO from the data contained in the received PtGChunkDataTO + * + * @param chunkTO + * @return + */ + private ReducedPtGChunkDataTO reduce(PtGChunkDataTO chunkTO) { + + ReducedPtGChunkDataTO reducedChunkTO = new ReducedPtGChunkDataTO(); + reducedChunkTO.setPrimaryKey(chunkTO.primaryKey()); + reducedChunkTO.setFromSURL(chunkTO.fromSURL()); + reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN()); + reducedChunkTO.setSurlUniqueID(chunkTO.surlUniqueID()); + reducedChunkTO.setStatus(chunkTO.status()); + reducedChunkTO.setErrString(chunkTO.errString()); + return reducedChunkTO; + } + + /** + * Checks if the received PtGChunkDataTO contains the fields not set by the front end but required + * + * @param chunkTO + * @return + */ + private boolean isComplete(PtGChunkDataTO chunkTO) { + + return (chunkTO.normalizedStFN() != null) && (chunkTO.surlUniqueID() != null); + } + + /** + * Method used to add into Persistence a new entry. The supplied PtGChunkData gets the primary key + * changed to the value assigned in Persistence. + * + * This method is intended to be used by a recursive PtG request: the parent request supplies a + * directory which must be expanded, so all new children requests resulting from the files in the + * directory are added into persistence. + * + * So this method does _not_ add a new SRM prepare_to_get request into the DB! + * + * The only children data written into the DB are: sourceSURL, TDirOption, statusCode and + * explanation. + * + * In case of any error the operation does not proceed, but no Exception is thrown! Proper + * messages get logged by underlaying DAO. + */ + synchronized public void addChild(PtGPersistentChunkData chunkData) { + + PtGChunkDataTO to = new PtGChunkDataTO(); + /* needed for now to find ID of request! Must be changed soon! */ + to.setRequestToken(chunkData.getRequestToken().toString()); + to.setFromSURL(chunkData.getSURL().toString()); + to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); + to.setSurlUniqueID(Integer.valueOf(chunkData.getSURL().uniqueId())); + + to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive()); + to.setDirOption(chunkData.getDirOption().isDirectory()); + to.setNumLevel(chunkData.getDirOption().getNumLevel()); + to.setStatus(StatusCodeConverter.getInstance().toDB(chunkData.getStatus().getStatusCode())); + to.setErrString(chunkData.getStatus().getExplanation()); + to.setClientDN(chunkData.getUser().getDn()); + if (chunkData.getUser() instanceof AbstractGridUser) { + if (((AbstractGridUser) chunkData.getUser()).hasVoms()) { + to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()).getFQANsAsString()); + } + + } + /* add the entry and update the Primary Key field! */ + dao.addChild(to); + /* set the assigned PrimaryKey! */ + chunkData.setPrimaryKey(to.primaryKey()); + } + + public void updateStatus(TRequestToken requestToken, TSURL surl, TStatusCode statusCode, + String explanation) { + + dao.updateStatus(requestToken, new int[] {surl.uniqueId()}, new String[] {surl.rawSurl()}, + statusCode, explanation); + } + + public void updateFromPreviousStatus(TRequestToken requestToken, TStatusCode expectedStatusCode, + TStatusCode newStatusCode, String explanation) { + + dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, newStatusCode, explanation); + } } diff --git a/src/main/java/it/grid/storm/catalogs/PtGChunkDAO.java b/src/main/java/it/grid/storm/catalogs/PtGChunkDAO.java deleted file mode 100644 index 78a837bfa..000000000 --- a/src/main/java/it/grid/storm/catalogs/PtGChunkDAO.java +++ /dev/null @@ -1,1765 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import static it.grid.storm.catalogs.ChunkDAOUtils.printWarnings; - -import it.grid.storm.config.Configuration; -import it.grid.storm.ea.StormEA; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.StoRI; -import it.grid.storm.namespace.naming.SURL; -import it.grid.storm.srm.types.InvalidTSURLAttributesException; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TRequestType; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TStatusCode; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Date; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map.Entry; -import java.util.Timer; -import java.util.TimerTask; - -/** - * DAO class for PtGChunkCatalog. This DAO is specifically designed to connect - * to a MySQL DB. The raw data found in those tables is pre-treated in order to - * turn it into the Object Model of StoRM. See Method comments for further info. - * - * BEWARE! DAO Adjusts for extra fields in the DB that are not present in the - * object model. - * - * @author EGRID ICTP - * @version 3.0 - * @date June 2005 - */ -public class PtGChunkDAO { - - private static final Logger log = LoggerFactory.getLogger(PtGChunkDAO.class); - - /** String with the name of the class for the DB driver */ - private final String driver = Configuration.getInstance().getDBDriver(); - /** String referring to the URL of the DB */ - private final String url = Configuration.getInstance().getStormDbURL(); - /** String with the password for the DB */ - private final String password = Configuration.getInstance().getDBPassword(); - /** String with the name for the DB */ - private final String name = Configuration.getInstance().getDBUserName(); - - /** Connection to DB - WARNING!!! It is kept open all the time! */ - private Connection con = null; - /** boolean that tells whether reconnection is needed because of MySQL bug! */ - private boolean reconnect = false; - - /** Singleton instance */ - private final static PtGChunkDAO dao = new PtGChunkDAO(); - - /** timer thread that will run a task to alert when reconnecting is necessary! */ - private Timer clock = null; - /** - * timer task that will update the boolean signaling that a reconnection is - * needed! - */ - private TimerTask clockTask = null; - /** milliseconds that must pass before reconnecting to DB */ - private final long period = Configuration.getInstance() - .getDBReconnectPeriod() * 1000; - /** initial delay in milliseconds before starting timer */ - private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000; - - private PtGChunkDAO() { - - setUpConnection(); - - clock = new Timer(); - clockTask = new TimerTask() { - - @Override - public void run() { - - reconnect = true; - } - }; // clock task - clock.scheduleAtFixedRate(clockTask, delay, period); - } - - /** - * Method that returns the only instance of the PtGChunkDAO. - */ - public static PtGChunkDAO getInstance() { - - return dao; - } - - /** - * Method used to add a new record to the DB: the supplied PtGChunkDataTO gets - * its primaryKey changed to the one assigned by the DB. - * - * The supplied PtGChunkData is used to fill in only the DB table where file - * specific info gets recorded: it does _not_ add a new request! So if - * spurious data is supplied, it will just stay there because of a lack of a - * parent request! - */ - public synchronized void addChild(PtGChunkDataTO to) { - - if (!checkConnection()) { - log.error("PTG CHUNK DAO: addChild - unable to get a valid connection!"); - return; - } - String str = null; - PreparedStatement id = null; // statement to find out the ID associated to - // the request token - ResultSet rsid = null; // result set containing the ID of the request. - try { - - // WARNING!!!! We are forced to run a query to get the ID of the request, - // which should NOT be so - // because the corresponding request object should have been changed with - // the extra field! However, it is not possible - // at the moment to perform such chage because of strict deadline and the - // change could wreak havoc - // the code. So we are forced to make this query!!! - - // begin transaction - con.setAutoCommit(false); - printWarnings(con.getWarnings()); - - // find ID of request corresponding to given RequestToken - str = "SELECT rq.ID FROM request_queue rq WHERE rq.r_token=?"; - - id = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - id.setString(1, to.requestToken()); - printWarnings(id.getWarnings()); - - log.debug("PTG CHUNK DAO: addChild; {}", id.toString()); - rsid = id.executeQuery(); - printWarnings(id.getWarnings()); - - /* ID of request in request_process! */ - int request_id = extractID(rsid); - int id_s = fillPtGTables(to, request_id); - - /* end transaction! */ - con.commit(); - printWarnings(con.getWarnings()); - con.setAutoCommit(true); - printWarnings(con.getWarnings()); - - // update primary key reading the generated key - to.setPrimaryKey(id_s); - } catch (SQLException e) { - log.error("PTG CHUNK DAO: unable to complete addChild! " - + "PtGChunkDataTO: {}; error: {}", to, e.getMessage(), e); - rollback(con); - } catch (Exception e) { - log.error("PTG CHUNK DAO: unable to complete addChild! " - + "PtGChunkDataTO: {}; error: {}", to, e.getMessage(), e); - rollback(con); - } finally { - close(rsid); - close(id); - } - } - - /** - * Method used to add a new record to the DB: the supplied PtGChunkDataTO gets - * its primaryKey changed to the one assigned by the DB. The client_dn must - * also be supplied as a String. - * - * The supplied PtGChunkData is used to fill in all the DB tables where file - * specific info gets recorded: it _adds_ a new request! - */ - public synchronized void addNew(PtGChunkDataTO to, String client_dn) { - - if (!checkConnection()) { - log.error("PTG CHUNK DAO: addNew - unable to get a valid connection!"); - return; - } - String str = null; - /* Result set containing the ID of the inserted new request */ - ResultSet rs_new = null; - /* Insert new request into process_request */ - PreparedStatement addNew = null; - /* Insert protocols for request. */ - PreparedStatement addProtocols = null; - try { - // begin transaction - con.setAutoCommit(false); - printWarnings(con.getWarnings()); - - // add to request_queue... - str = "INSERT INTO request_queue (config_RequestTypeID,client_dn,pinLifetime,status,errstring,r_token,nbreqfiles,timeStamp) VALUES (?,?,?,?,?,?,?,?)"; - addNew = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - printWarnings(con.getWarnings()); - /* Request type set to prepare to get! */ - addNew.setString(1, - RequestTypeConverter.getInstance().toDB(TRequestType.PREPARE_TO_GET)); - printWarnings(addNew.getWarnings()); - - addNew.setString(2, client_dn); - printWarnings(addNew.getWarnings()); - - addNew.setInt(3, to.lifeTime()); - printWarnings(addNew.getWarnings()); - - addNew.setInt( - 4, - StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_INPROGRESS)); - printWarnings(addNew.getWarnings()); - - addNew.setString(5, "New PtG Request resulting from srmCopy invocation."); - printWarnings(addNew.getWarnings()); - - addNew.setString(6, to.requestToken()); - printWarnings(addNew.getWarnings()); - - addNew.setInt(7, 1); // number of requested files set to 1! - printWarnings(addNew.getWarnings()); - - addNew.setTimestamp(8, new Timestamp(new Date().getTime())); - printWarnings(addNew.getWarnings()); - - log.trace("PTG CHUNK DAO: addNew; {}", addNew.toString()); - addNew.execute(); - printWarnings(addNew.getWarnings()); - - rs_new = addNew.getGeneratedKeys(); - int id_new = extractID(rs_new); - - // add protocols... - str = "INSERT INTO request_TransferProtocols (request_queueID,config_ProtocolsID) VALUES (?,?)"; - addProtocols = con.prepareStatement(str); - printWarnings(con.getWarnings()); - for (Iterator i = to.protocolList().iterator(); i.hasNext();) { - addProtocols.setInt(1, id_new); - printWarnings(addProtocols.getWarnings()); - - addProtocols.setString(2, i.next()); - printWarnings(addProtocols.getWarnings()); - - log.trace("PTG CHUNK DAO: addNew; {}", addProtocols.toString()); - addProtocols.execute(); - printWarnings(addProtocols.getWarnings()); - } - - // addChild... - int id_s = fillPtGTables(to, id_new); - - // end transaction! - con.commit(); - printWarnings(con.getWarnings()); - con.setAutoCommit(true); - printWarnings(con.getWarnings()); - - // update primary key reading the generated key - to.setPrimaryKey(id_s); - } catch (SQLException e) { - log.error("PTG CHUNK DAO: Rolling back! Unable to complete addNew! " - + "PtGChunkDataTO: {}; error: {}", to, e.getMessage(), e); - rollback(con); - } catch (Exception e) { - log.error("PTG CHUNK DAO: unable to complete addNew! " - + "PtGChunkDataTO: {}; error: {}", to, e.getMessage(), e); - rollback(con); - } finally { - close(rs_new); - close(addNew); - close(addProtocols); - } - } - - /** - * To be used inside a transaction - * - * @param to - * @param requestQueueID - * @return - * @throws SQLException - * @throws Exception - */ - private synchronized int fillPtGTables(PtGChunkDataTO to, int requestQueueID) - throws SQLException, Exception { - - String str = null; - /* Result set containing the ID of the inserted */ - ResultSet rs_do = null; - /* Result set containing the ID of the inserted */ - ResultSet rs_g = null; - /* Result set containing the ID of the inserted */ - ResultSet rs_s = null; - /* insert TDirOption for request */ - PreparedStatement addDirOption = null; - /* insert request_Get for request */ - PreparedStatement addGet = null; - PreparedStatement addChild = null; - - try { - // first fill in TDirOption - str = "INSERT INTO request_DirOption (isSourceADirectory,allLevelRecursive,numOfLevels) VALUES (?,?,?)"; - addDirOption = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - printWarnings(con.getWarnings()); - addDirOption.setBoolean(1, to.dirOption()); - printWarnings(addDirOption.getWarnings()); - - addDirOption.setBoolean(2, to.allLevelRecursive()); - printWarnings(addDirOption.getWarnings()); - - addDirOption.setInt(3, to.numLevel()); - printWarnings(addDirOption.getWarnings()); - - log.trace("PTG CHUNK DAO: addNew; {}", addDirOption.toString()); - addDirOption.execute(); - printWarnings(addDirOption.getWarnings()); - - rs_do = addDirOption.getGeneratedKeys(); - int id_do = extractID(rs_do); - - // second fill in request_Get... sourceSURL and TDirOption! - str = "INSERT INTO request_Get (request_DirOptionID,request_queueID,sourceSURL,normalized_sourceSURL_StFN,sourceSURL_uniqueID) VALUES (?,?,?,?,?)"; - addGet = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - printWarnings(con.getWarnings()); - addGet.setInt(1, id_do); - printWarnings(addGet.getWarnings()); - - addGet.setInt(2, requestQueueID); - printWarnings(addGet.getWarnings()); - - addGet.setString(3, to.fromSURL()); - printWarnings(addGet.getWarnings()); - - addGet.setString(4, to.normalizedStFN()); - printWarnings(addGet.getWarnings()); - - addGet.setInt(5, to.surlUniqueID()); - printWarnings(addGet.getWarnings()); - - log.trace("PTG CHUNK DAO: addNew; {}", addGet.toString()); - addGet.execute(); - printWarnings(addGet.getWarnings()); - - rs_g = addGet.getGeneratedKeys(); - int id_g = extractID(rs_g); - - // third fill in status_Get... - str = "INSERT INTO status_Get (request_GetID,statusCode,explanation) VALUES (?,?,?)"; - addChild = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - printWarnings(con.getWarnings()); - addChild.setInt(1, id_g); - printWarnings(addChild.getWarnings()); - - addChild.setInt(2, to.status()); - printWarnings(addChild.getWarnings()); - - addChild.setString(3, to.errString()); - printWarnings(addChild.getWarnings()); - - log.trace("PTG CHUNK DAO: addNew; {}", addChild.toString()); - addChild.execute(); - printWarnings(addChild.getWarnings()); - - return id_g; - } finally { - close(rs_do); - close(rs_g); - close(rs_s); - close(addDirOption); - close(addGet); - close(addChild); - } - } - - /** - * Method used to save the changes made to a retrieved PtGChunkDataTO, back - * into the MySQL DB. - * - * Only the fileSize, transferURL, statusCode and explanation, of status_Get - * table are written to the DB. Likewise for the request pinLifetime. - * - * In case of any error, an error message gets logged but no exception is - * thrown. - */ - public synchronized void update(PtGChunkDataTO to) { - - if (!checkConnection()) { - log.error("PTG CHUNK DAO: update - unable to get a valid connection!"); - return; - } - PreparedStatement updateFileReq = null; - try { - // ready updateFileReq... - updateFileReq = con - .prepareStatement("UPDATE request_queue rq JOIN (status_Get sg, request_Get rg) ON (rq.ID=rg.request_queueID AND sg.request_GetID=rg.ID) " - + "SET sg.fileSize=?, sg.transferURL=?, sg.statusCode=?, sg.explanation=?, rq.pinLifetime=?, rg.normalized_sourceSURL_StFN=?, rg.sourceSURL_uniqueID=? " - + "WHERE rg.ID=?"); - printWarnings(con.getWarnings()); - - updateFileReq.setLong(1, to.fileSize()); - printWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(2, to.turl()); - printWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(3, to.status()); - printWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(4, to.errString()); - printWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(5, to.lifeTime()); - printWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(6, to.normalizedStFN()); - printWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(7, to.surlUniqueID()); - printWarnings(updateFileReq.getWarnings()); - - updateFileReq.setLong(8, to.primaryKey()); - printWarnings(updateFileReq.getWarnings()); - // execute update - log.trace("PTG CHUNK DAO: update method; {}", updateFileReq.toString()); - updateFileReq.executeUpdate(); - printWarnings(updateFileReq.getWarnings()); - } catch (SQLException e) { - log.error("PtG CHUNK DAO: Unable to complete update! {}", - e.getMessage(), e); - } finally { - close(updateFileReq); - } - } - - /** - * Updates the request_Get represented by the received ReducedPtGChunkDataTO - * by setting its normalized_sourceSURL_StFN and sourceSURL_uniqueID - * - * @param chunkTO - */ - public synchronized void updateIncomplete(ReducedPtGChunkDataTO chunkTO) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: updateIncomplete - unable to get a valid connection!"); - return; - } - String str = "UPDATE request_Get rg SET rg.normalized_sourceSURL_StFN=?, rg.sourceSURL_uniqueID=? " - + "WHERE rg.ID=?"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - stmt.setString(1, chunkTO.normalizedStFN()); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2, chunkTO.surlUniqueID()); - printWarnings(stmt.getWarnings()); - - stmt.setLong(3, chunkTO.primaryKey()); - printWarnings(stmt.getWarnings()); - - log.trace("PtG CHUNK DAO - update incomplete: {}", stmt.toString()); - stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - } catch (SQLException e) { - log.error("PtG CHUNK DAO: Unable to complete update incomplete! {}", - e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * TODO WARNING! THIS IS A WORK IN PROGRESS!!! - * - * Method used to refresh the PtGChunkDataTO information from the MySQL DB. - * - * In this first version, only the statusCode and the TURL are reloaded from - * the DB. TODO The next version must contains all the information related to - * the Chunk! - * - * In case of any error, an error messagge gets logged but no exception is - * thrown. - */ - - public synchronized PtGChunkDataTO refresh(long primary_key) { - - if (!checkConnection()) { - log.error("PTG CHUNK DAO: refresh - unable to get a valid connection!"); - return null; - } - String queryString = null; - PreparedStatement find = null; - ResultSet rs = null; - - try { - // get chunks of the request - queryString = "SELECT sg.statusCode, sg.transferURL " - + "FROM status_Get sg " + "WHERE sg.request_GetID=?"; - find = con.prepareStatement(queryString); - printWarnings(con.getWarnings()); - find.setLong(1, primary_key); - printWarnings(find.getWarnings()); - log.trace("PTG CHUNK DAO: refresh status method; {}", find.toString()); - - rs = find.executeQuery(); - - printWarnings(find.getWarnings()); - PtGChunkDataTO chunkDataTO = null; - // The result shoul be un - while (rs.next()) { - chunkDataTO = new PtGChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sg.statusCode")); - chunkDataTO.setTurl(rs.getString("sg.transferURL")); - } - return chunkDataTO; - } catch (SQLException e) { - log.error("PTG CHUNK DAO: {}", e.getMessage(), e); - /* Return null TransferObject! */ - return null; - } finally { - close(rs); - close(find); - } - } - - /** - * Method that queries the MySQL DB to find all entries matching the supplied - * TRequestToken. The Collection contains the corresponding PtGChunkDataTO - * objects. - * - * An initial simple query establishes the list of protocols associated with - * the request. A second complex query establishes all chunks associated with - * the request, by properly joining request_queue, request_Get, status_Get and - * request_DirOption. The considered fields are: - * - * (1) From status_Get: the ID field which becomes the TOs primary key, and - * statusCode. - * - * (2) From request_Get: sourceSURL - * - * (3) From request_queue: pinLifetime - * - * (4) From request_DirOption: isSourceADirectory, alLevelRecursive, - * numOfLevels - * - * In case of any error, a log gets written and an empty collection is - * returned. No exception is thrown. - * - * NOTE! Chunks in SRM_ABORTED status are NOT returned! - */ - public synchronized Collection find(TRequestToken requestToken) { - - if (!checkConnection()) { - log.error("PTG CHUNK DAO: find - unable to get a valid connection!"); - return new ArrayList(); - } - String strToken = requestToken.toString(); - String str = null; - PreparedStatement find = null; - ResultSet rs = null; - try { - str = "SELECT tp.config_ProtocolsID " - + "FROM request_TransferProtocols tp JOIN request_queue rq ON tp.request_queueID=rq.ID " - + "WHERE rq.r_token=?"; - - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - List protocols = new ArrayList(); - find.setString(1, strToken); - printWarnings(find.getWarnings()); - - log.trace("PTG CHUNK DAO: find method; {}", find.toString()); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - while (rs.next()) { - protocols.add(rs.getString("tp.config_ProtocolsID")); - } - close(rs); - close(find); - - // get chunks of the request - str = "SELECT sg.statusCode, rq.pinLifetime, rg.ID, rq.timeStamp, rq.client_dn, rq.proxy, rg.sourceSURL, " - + "rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID, d.isSourceADirectory, " - + "d.allLevelRecursive, d.numOfLevels " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " - + "LEFT JOIN request_DirOption d ON rg.request_DirOptionID=d.ID " - + "WHERE rq.r_token=? AND sg.statusCode<>?"; - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - ArrayList list = new ArrayList(); - find.setString(1, strToken); - printWarnings(find.getWarnings()); - - find.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - printWarnings(find.getWarnings()); - - log.trace("PTG CHUNK DAO: find method; " + find.toString()); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - PtGChunkDataTO chunkDataTO; - while (rs.next()) { - chunkDataTO = new PtGChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sg.statusCode")); - chunkDataTO.setRequestToken(strToken); - chunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); - chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); - chunkDataTO.setNormalizedStFN(rs - .getString("rg.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(new Integer(uniqueID)); - } - - chunkDataTO.setClientDN(rs.getString("rq.client_dn")); - - /** - * This code is only for the 1.3.18. This is a workaround to get FQANs - * using the proxy field on request_queue. The FE use the proxy field of - * request_queue to insert a single FQAN string containing all FQAN - * separeted by the "#" char. The proxy is a BLOB, hence it has to be - * properly conveted in string. - */ - java.sql.Blob blob = rs.getBlob("rq.proxy"); - if (!rs.wasNull() && blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - chunkDataTO.setVomsAttributes(new String(bdata)); - } - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setLifeTime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setDirOption(rs.getBoolean("d.isSourceADirectory")); - chunkDataTO.setAllLevelRecursive(rs.getBoolean("d.allLevelRecursive")); - chunkDataTO.setNumLevel(rs.getInt("d.numOfLevels")); - chunkDataTO.setProtocolList(protocols); - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTG CHUNK DAO: ", e.getMessage(), e); - /* Return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns a Collection of ReducedPtGChunkDataTO associated to the - * given TRequestToken expressed as String. - */ - public synchronized Collection findReduced( - String reqtoken) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: findReduced - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - // get reduced chunks - String str = "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " - + "WHERE rq.r_token=?"; - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, reqtoken); - printWarnings(find.getWarnings()); - - log.trace("PtG CHUNK DAO! findReduced with request token; {}", find.toString()); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - ReducedPtGChunkDataTO reducedChunkDataTO = null; - while (rs.next()) { - reducedChunkDataTO = new ReducedPtGChunkDataTO(); - reducedChunkDataTO.setStatus(rs.getInt("sg.statusCode")); - reducedChunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); - reducedChunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); - reducedChunkDataTO.setNormalizedStFN(rs - .getString("rg.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - reducedChunkDataTO.setSurlUniqueID(uniqueID); - } - - list.add(reducedChunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTG CHUNK DAO: {}", e.getMessage(), e); - /* Return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - public synchronized Collection findReduced( - TRequestToken requestToken, int[] surlsUniqueIDs, String[] surlsArray) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: findReduced - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - - try { - - String str = "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " - + "WHERE rq.r_token=? AND ( rg.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) - + " AND rg.sourceSURL IN " - + makeSurlString(surlsArray) + " ) "; - - find = con.prepareStatement(str); - - printWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, requestToken.getValue()); - printWarnings(find.getWarnings()); - - log.trace("PtG CHUNK DAO! findReduced with griduser+surlarray; {}", find.toString()); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - ReducedPtGChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new ReducedPtGChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sg.statusCode")); - chunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); - chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); - chunkDataTO.setNormalizedStFN(rs - .getString("rg.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(uniqueID); - } - - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTG CHUNK DAO: {}", e.getMessage(), e); - /* Return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns a Collection of ReducedPtGChunkDataTO associated to the - * given griduser, and whose SURLs are contained in the supplied array of - * Strings. - */ - public synchronized Collection findReduced( - String griduser, int[] surlUniqueIDs, String[] surls) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: findReduced - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - /* - * NOTE: we search also on the fromSurl because otherwise we lost all - * request_get that have not the uniqueID set because are not yet been - * used by anybody - */ - // get reduced chunks - String str = "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " - + "WHERE rq.client_dn=? AND ( rg.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlUniqueIDs) - + " AND rg.sourceSURL IN " - + makeSurlString(surls) + " ) "; - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, griduser); - printWarnings(find.getWarnings()); - - log.trace("PtG CHUNK DAO! findReduced with griduser+surlarray; {}", find.toString()); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - ReducedPtGChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new ReducedPtGChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sg.statusCode")); - chunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); - chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); - chunkDataTO.setNormalizedStFN(rs - .getString("rg.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(uniqueID); - } - - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTG CHUNK DAO: {}", e.getMessage(), e); - /* Return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method used in extraordinary situations to signal that data retrieved from - * the DB was malformed and could not be translated into the StoRM object - * model. - * - * This method attempts to change the status of the request to SRM_FAILURE and - * record it in the DB. - * - * This operation could potentially fail because the source of the malformed - * problems could be a problematic DB; indeed, initially only log messagges - * where recorded. - * - * Yet it soon became clear that the source of malformed data were the clients - * and/or FE recording info in the DB. In these circumstances the client would - * see its request as being in the SRM_IN_PROGRESS state for ever. Hence the - * pressing need to inform it of the encountered problems. - */ - public synchronized void signalMalformedPtGChunk(PtGChunkDataTO auxTO) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: signalMalformedPtGChunk - unable to get a valid connection!"); - return; - } - String signalSQL = "UPDATE status_Get SET statusCode=" - + StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FAILURE) - + ", explanation=? WHERE request_GetID=" + auxTO.primaryKey(); - PreparedStatement signal = null; - try { - signal = con.prepareStatement(signalSQL); - printWarnings(con.getWarnings()); - /* Prepared statement spares DB-specific String notation! */ - signal.setString(1, "Request is malformed!"); - printWarnings(signal.getWarnings()); - - log.trace("PTG CHUNK DAO: signalMalformed; {}", signal.toString()); - signal.executeUpdate(); - printWarnings(signal.getWarnings()); - } catch (SQLException e) { - log.error("PtGChunkDAO! Unable to signal in DB that the request was " - + "malformed! Request: {}; Exception: {}", auxTO.toString(), e.toString()); - } finally { - close(signal); - } - } - - /** - * Method that returns the number of Get requests on the given SURL, that are - * in SRM_FILE_PINNED state. - * - * This method is intended to be used by PtGChunkCatalog in the - * isSRM_FILE_PINNED method invocation. - * - * In case of any error, 0 is returned. - */ - // request_Get table - public synchronized int numberInSRM_FILE_PINNED(int surlUniqueID) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: numberInSRM_FILE_PINNED - unable to get a valid connection!"); - return 0; - } - String str = "SELECT COUNT(rg.ID) " - + "FROM status_Get sg JOIN request_Get rg " - + "ON (sg.request_GetID=rg.ID) " - + "WHERE rg.sourceSURL_uniqueID=? AND sg.statusCode=?"; - PreparedStatement find = null; - ResultSet rs = null; - try { - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - /* Prepared statement spares DB-specific String notation! */ - find.setInt(1, surlUniqueID); - printWarnings(find.getWarnings()); - - find.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED)); - printWarnings(find.getWarnings()); - - log.trace("PtG CHUNK DAO - numberInSRM_FILE_PINNED method: {}", find.toString()); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - int numberFilePinned = 0; - if (rs.next()) { - numberFilePinned = rs.getInt(1); - } - return numberFilePinned; - } catch (SQLException e) { - log.error("PtG CHUNK DAO! Unable to determine numberInSRM_FILE_PINNED! " - + "Returning 0! {}", e.getMessage(), e); - return 0; - } finally { - close(rs); - close(find); - } - } - - /** - * Method that updates all expired requests in SRM_FILE_PINNED state, into - * SRM_RELEASED. - * - * This is needed when the client forgets to invoke srmReleaseFiles(). - * - * @return - */ - public synchronized List transitExpiredSRM_FILE_PINNED() { - - // tring to the surl unique ID - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: transitExpiredSRM_FILE_PINNED - unable to get a valid connection!"); - return new ArrayList(); - } - HashMap expiredSurlMap = new HashMap(); - String str = null; - // Statement statement = null; - PreparedStatement preparedStatement = null; - - /* Find all expired surls */ - try { - // start transaction - con.setAutoCommit(false); - - str = "SELECT rg.sourceSURL , rg.sourceSURL_uniqueID " - + "FROM request_Get rg JOIN (status_Get sg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "WHERE sg.statusCode=?" - + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; - - preparedStatement = con.prepareStatement(str); - preparedStatement.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED)); - - ResultSet res = preparedStatement.executeQuery(); - printWarnings(preparedStatement.getWarnings()); - - while (res.next()) { - String sourceSURL = res.getString("rg.sourceSURL"); - Integer uniqueID = new Integer(res.getInt("rg.sourceSURL_uniqueID")); - /* If the uniqueID is not setted compute it */ - if (res.wasNull()) { - try { - TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); - uniqueID = tsurl.uniqueId(); - } catch (InvalidTSURLAttributesException e) { - log.warn("PtGChunkDAO! unable to build the TSURL from {}: " - + "InvalidTSURLAttributesException {}", sourceSURL, e.getMessage(), e); - } - } - expiredSurlMap.put(sourceSURL, uniqueID); - } - - if (expiredSurlMap.isEmpty()) { - commit(con); - log - .trace("PtGChunkDAO! No chunk of PtG request was transited from SRM_FILE_PINNED to SRM_RELEASED."); - return new ArrayList(); - } - } catch (SQLException e) { - log.error("PtGChunkDAO! SQLException. {}", e.getMessage(), e); - rollback(con); - return new ArrayList(); - } finally { - close(preparedStatement); - } - - /* Update status of all expired surls to SRM_RELEASED */ - - preparedStatement = null; - try { - - str = "UPDATE " - + "status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=? " - + "WHERE sg.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; - - preparedStatement = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - preparedStatement.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED)); - printWarnings(preparedStatement.getWarnings()); - - preparedStatement.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED)); - printWarnings(preparedStatement.getWarnings()); - - log.trace("PtG CHUNK DAO - transitExpiredSRM_FILE_PINNED method: {}", - preparedStatement.toString()); - - int count = preparedStatement.executeUpdate(); - printWarnings(preparedStatement.getWarnings()); - - if (count == 0) { - log.trace("PtGChunkDAO! No chunk of PtG request was " - + "transited from SRM_FILE_PINNED to SRM_RELEASED."); - } else { - log.info("PtGChunkDAO! {} chunks of PtG requests were transited from" - + " SRM_FILE_PINNED to SRM_RELEASED.", count); - } - } catch (SQLException e) { - log.error("PtGChunkDAO! Unable to transit expired SRM_FILE_PINNED chunks " - + "of PtG requests, to SRM_RELEASED! {}", e.getMessage(), e); - rollback(con); - return new ArrayList(); - } finally { - close(preparedStatement); - } - - /* - * in order to enhance performance here we can check if there is any file - * system with tape (T1D0, T1D1), if there is not any we can skip the - * following - */ - - /* Find all not expired surls from PtG and BoL */ - - HashSet pinnedSurlSet = new HashSet(); - try { - - // SURLs pinned by PtGs - str = "SELECT rg.sourceSURL , rg.sourceSURL_uniqueID FROM " - + "request_Get rg JOIN (status_Get sg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "WHERE sg.statusCode=?" - + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime "; - - preparedStatement = con.prepareStatement(str); - preparedStatement.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED)); - - ResultSet res = preparedStatement.executeQuery(); - printWarnings(preparedStatement.getWarnings()); - - while (res.next()) { - String sourceSURL = res.getString("rg.sourceSURL"); - Integer uniqueID = new Integer(res.getInt("rg.sourceSURL_uniqueID")); - /* If the uniqueID is not setted compute it */ - if (res.wasNull()) { - try { - TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); - uniqueID = tsurl.uniqueId(); - } catch (InvalidTSURLAttributesException e) { - log.warn("PtGChunkDAO! unable to build the TSURL from {}. " - + "InvalidTSURLAttributesException: {}", sourceSURL, e.getMessage()); - } - } - pinnedSurlSet.add(uniqueID); - } - - close(preparedStatement); - - // SURLs pinned by BoLs - str = "SELECT rb.sourceSURL , rb.sourceSURL_uniqueID FROM " - + "request_BoL rb JOIN (status_BoL sb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " - + "WHERE sb.statusCode=?" - + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime "; - - preparedStatement = con.prepareStatement(str); - preparedStatement.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS)); - - res = preparedStatement.executeQuery(); - printWarnings(preparedStatement.getWarnings()); - - while (res.next()) { - String sourceSURL = res.getString("rb.sourceSURL"); - Integer uniqueID = new Integer(res.getInt("rb.sourceSURL_uniqueID")); - /* If the uniqueID is not setted compute it */ - if (res.wasNull()) { - try { - TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); - uniqueID = tsurl.uniqueId(); - } catch (InvalidTSURLAttributesException e) { - log.warn("PtGChunkDAO! unable to build the TSURL from {}. " - + "InvalidTSURLAttributesException: {}", sourceSURL, e.getMessage(), e); - } - } - pinnedSurlSet.add(uniqueID); - } - commit(con); - } catch (SQLException e) { - log.error("PtGChunkDAO! SQLException. {}", e.getMessage(), e); - rollback(con); - } finally { - close(preparedStatement); - } - - ArrayList expiredSurlList = new ArrayList(); - /* Remove the Extended Attribute pinned if there is not a valid surl on it */ - TSURL surl; - for (Entry surlEntry : expiredSurlMap.entrySet()) { - if (!pinnedSurlSet.contains(surlEntry.getValue())) { - try { - surl = TSURL.makeFromStringValidate(surlEntry.getKey()); - } catch (InvalidTSURLAttributesException e) { - log.error("Invalid SURL, cannot release the pin " - + "(Extended Attribute): {}", surlEntry.getKey()); - continue; - } - expiredSurlList.add(surl); - StoRI stori; - try { - stori = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl); - } catch (Throwable e) { - log.error("Invalid SURL {} cannot release the pin. {}: {}", - surlEntry.getKey(), e.getClass().getCanonicalName(), e.getMessage(), e); - continue; - } - - if (stori.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) { - StormEA.removePinned(stori.getAbsolutePath()); - } - } - } - return expiredSurlList; - } - - /** - * Method that updates all chunks in SRM_FILE_PINNED state, into SRM_RELEASED. - * An array of long representing the primary key of each chunk is required: - * only they get the status changed provided their current status is - * SRM_FILE_PINNED. - * - * This method is used during srmReleaseFiles - * - * In case of any error nothing happens and no exception is thrown, but proper - * messagges get logged. - */ - public synchronized void transitSRM_FILE_PINNEDtoSRM_RELEASED(long[] ids) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: transitSRM_FILE_PINNEDtoSRM_RELEASED - unable to get a valid connection!"); - return; - } - String str = "UPDATE status_Get sg SET sg.statusCode=? " - + "WHERE sg.statusCode=? AND sg.request_GetID IN " + makeWhereString(ids); - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - stmt.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED)); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED)); - printWarnings(stmt.getWarnings()); - - log.trace("PtG CHUNK DAO - transitSRM_FILE_PINNEDtoSRM_RELEASED: {}", - stmt.toString()); - int count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("PtG CHUNK DAO! No chunk of PtG request was " - + "transited from SRM_FILE_PINNED to SRM_RELEASED."); - } else { - log.info("PtG CHUNK DAO! {} chunks of PtG requests were transited " - + "from SRM_FILE_PINNED to SRM_RELEASED.", count); - } - } catch (SQLException e) { - log.error("PtG CHUNK DAO! Unable to transit chunks" - + " from SRM_FILE_PINNED to SRM_RELEASED! {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * @param ids - * @param token - */ - public synchronized void transitSRM_FILE_PINNEDtoSRM_RELEASED(long[] ids, - TRequestToken token) { - - if (token == null) { - transitSRM_FILE_PINNEDtoSRM_RELEASED(ids); - return; - } - - /* - * If a request token has been specified, only the related Get requests - * have to be released. This is done adding the r.r_token="..." clause in - * the where subquery. - */ - if (!checkConnection()) { - log.error("PTG CHUNK DAO: transitSRM_FILE_PINNEDtoSRM_RELEASED - " - + "unable to get a valid connection!"); - return; - } - - String str = "UPDATE " - + "status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=? " + "WHERE sg.statusCode=? AND rq.r_token='" - + token.toString() + "' AND rg.ID IN " + makeWhereString(ids); - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - stmt.setInt(1,StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED)); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2,StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED)); - printWarnings(stmt.getWarnings()); - - log.trace("PtG CHUNK DAO - transitSRM_FILE_PINNEDtoSRM_RELEASED: {}", stmt.toString()); - int count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("PtG CHUNK DAO! No chunk of PtG request was" - + " transited from SRM_FILE_PINNED to SRM_RELEASED."); - } else { - log.info("PtG CHUNK DAO! {} chunks of PtG requests were transited from " - + "SRM_FILE_PINNED to SRM_RELEASED.", count); - } - } catch (SQLException e) { - log.error("PtG CHUNK DAO! Unable to transit chunks from " - + "SRM_FILE_PINNED to SRM_RELEASED! {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - public synchronized void updateStatus(TRequestToken requestToken, - int[] surlUniqueIDs, String[] surls, TStatusCode statusCode, - String explanation) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: updateStatus - unable to get a valid connection!"); - return; - } - String str = "UPDATE " - + "status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=? , sg.explanation=? " + "WHERE rq.r_token='" - + requestToken.toString() + "' AND ( rg.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlUniqueIDs) + " AND rg.sourceSURL IN " - + makeSurlString(surls) + " ) "; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - stmt.setInt(1, StatusCodeConverter.getInstance().toDB(statusCode)); - printWarnings(stmt.getWarnings()); - - stmt.setString(2, (explanation != null ? explanation : "")); - printWarnings(stmt.getWarnings()); - - log.trace("PtG CHUNK DAO - updateStatus: {}", stmt.toString()); - int count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("PtG CHUNK DAO! No chunk of PtG request was updated to {}.", - statusCode); - } else { - log.info("PtG CHUNK DAO! {} chunks of PtG requests were updated to {}.", - count, statusCode); - } - } catch (SQLException e) { - log.error("PtG CHUNK DAO! Unable to updated to {}! {}", statusCode, - e.getMessage(), e); - } finally { - close(stmt); - } - } - - public synchronized void updateStatusOnMatchingStatus(int[] surlsUniqueIDs, - String[] surls, TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) throws IllegalArgumentException { - - if (surlsUniqueIDs == null || surls == null || explanation == null - || surlsUniqueIDs.length == 0 || surls.length == 0 - || surlsUniqueIDs.length != surls.length) { - - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs + " surls=" - + surls + " explanation=" + explanation); - } - - doUpdateStatusOnMatchingStatus(null, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode, explanation, false, true, true); - } - - public synchronized void updateStatusOnMatchingStatus( - TRequestToken requestToken, TStatusCode expectedStatusCode, - TStatusCode newStatusCode, String explanation) { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || explanation == null) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken + " explanation=" - + explanation); - } - doUpdateStatusOnMatchingStatus(requestToken, null, null, - expectedStatusCode, newStatusCode, explanation, true, false, true); - } - - public synchronized void updateStatusOnMatchingStatus( - TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls, - TStatusCode expectedStatusCode, TStatusCode newStatusCode) - throws IllegalArgumentException { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || surlsUniqueIDs == null || surls == null || surlsUniqueIDs.length == 0 - || surls.length == 0 || surlsUniqueIDs.length != surls.length) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken - + "surlsUniqueIDs=" + surlsUniqueIDs + " surls=" + surls); - } - doUpdateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode, null, true, true, false); - } - - public synchronized void doUpdateStatusOnMatchingStatus( - TRequestToken requestToken, int[] surlUniqueIDs, String[] surls, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation, boolean withRequestToken, boolean withSurls, - boolean withExplanation) throws IllegalArgumentException { - - if ((withRequestToken && requestToken == null) - || (withExplanation && explanation == null) - || (withSurls && (surlUniqueIDs == null || surls == null))) { - - throw new IllegalArgumentException( - "Unable to perform the doUpdateStatusOnMatchingStatus, " - + "invalid arguments: withRequestToken=" + withRequestToken - + " requestToken=" + requestToken + " withSurls=" + withSurls - + " surlUniqueIDs=" + surlUniqueIDs + " surls=" + surls - + " withExplaination=" + withExplanation + " explanation=" - + explanation); - } - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: updateStatusOnMatchingStatus - unable to get a valid connection!"); - return; - } - String str = "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) " - + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=? "; - if (withExplanation) { - str += " , " + buildExpainationSet(explanation); - } - str += " WHERE sg.statusCode=? "; - if (withRequestToken) { - str += " AND " + buildTokenWhereClause(requestToken); - } - if (withSurls) { - str += " AND " + buildSurlsWhereClause(surlUniqueIDs, surls); - } - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - stmt.setInt(1, StatusCodeConverter.getInstance().toDB(newStatusCode)); - printWarnings(stmt.getWarnings()); - - stmt - .setInt(2, StatusCodeConverter.getInstance().toDB(expectedStatusCode)); - printWarnings(stmt.getWarnings()); - - log.trace("PtG CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt.toString()); - int count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("PtG CHUNK DAO! No chunk of PtG request was updated " - + "from {} to {}.", expectedStatusCode, newStatusCode); - } else { - log.debug("PtG CHUNK DAO! {} chunks of PtG requests were updated " - + "from {} to {}.", count, expectedStatusCode, newStatusCode); - } - } catch (SQLException e) { - log.error("PtG CHUNK DAO! Unable to updated from {} to {}! {}", - expectedStatusCode, newStatusCode, e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Auxiliary method used to close a ResultSet - */ - private void close(ResultSet rset) { - - if (rset != null) { - try { - rset.close(); - } catch (Exception e) { - log.error("PTG CHUNK DAO! Unable to close ResultSet! Error: {}", - e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to close a Statement - */ - private void close(Statement stmt) { - - if (stmt != null) { - try { - stmt.close(); - } catch (Exception e) { - log.error("PTG CHUNK DAO! Unable to close Statement {} - Error: {}", - stmt.toString(), e.getMessage(), e); - } - } - } - - private void commit(Connection con) { - - if (con != null) { - try { - con.commit(); - con.setAutoCommit(true); - } catch (SQLException e) { - log.error("PtG, SQL Exception: {}", e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to roll back a failed transaction - */ - private void rollback(Connection con) { - - if (con != null) { - try { - con.rollback(); - con.setAutoCommit(true); - log.error("PTG CHUNK DAO: roll back successful!"); - } catch (SQLException e2) { - log.error("PTG CHUNK DAO: roll back failed! {}", e2.getMessage(), e2); - } - } - } - - /** - * Private method that returns the generated ID: it throws an exception in - * case of any problem! - */ - private int extractID(ResultSet rs) throws Exception { - - if (rs == null) { - throw new Exception("PTG CHUNK DAO! Null ResultSet!"); - } - if (rs.next()) { - return rs.getInt(1); - } else { - log.error("PTG CHUNK DAO! It was not possible to establish " - + "the assigned autoincrement primary key!"); - throw new Exception("PTG CHUNK DAO! It was not possible to" - + " establish the assigned autoincrement primary key!"); - } - } - - /** - * Method that returns a String containing all IDs. - */ - private String makeWhereString(long[] rowids) { - - StringBuilder sb = new StringBuilder("("); - int n = rowids.length; - for (int i = 0; i < n; i++) { - sb.append(rowids[i]); - if (i < (n - 1)) { - sb.append(","); - } - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns a String containing all Surl's IDs. - */ - private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) { - - StringBuilder sb = new StringBuilder("("); - for (int i = 0; i < surlUniqueIDs.length; i++) { - if (i > 0) { - sb.append(","); - } - sb.append(surlUniqueIDs[i]); - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns a String containing all Surls. - */ - private String makeSurlString(String[] surls) { - - StringBuilder sb = new StringBuilder("("); - int n = surls.length; - - for (int i = 0; i < n; i++) { - - SURL requestedSURL; - - try { - requestedSURL = SURL.makeSURLfromString(surls[i]); - } catch (NamespaceException e) { - log.error(e.getMessage()); - log.debug("Skip '{}' during query creation", surls[i]); - continue; - } - - sb.append("'"); - sb.append(requestedSURL.getNormalFormAsString()); - sb.append("','"); - sb.append(requestedSURL.getQueryFormAsString()); - sb.append("'"); - - if (i < (n - 1)) { - sb.append(","); - } - } - - sb.append(")"); - return sb.toString(); - } - - /** - * Auxiliary method that sets up the connection to the DB, as well as the - * prepared statement. - */ - private boolean setUpConnection() { - - boolean response = false; - try { - Class.forName(driver); - con = DriverManager.getConnection(url, name, password); - printWarnings(con.getWarnings()); - response = con.isValid(0); - } catch (ClassNotFoundException | SQLException e) { - log.error("PTG CHUNK DAO! Exception in setUpConnection! {}", e.getMessage(), e); - } - return response; - } - - /** - * Auxiliary method that checks if time for resetting the connection has come, - * and eventually takes it down and up back again. - */ - private boolean checkConnection() { - - boolean response = true; - if (reconnect) { - log.debug("PTG CHUNK DAO! Reconnecting to DB! "); - takeDownConnection(); - response = setUpConnection(); - if (response) { - reconnect = false; - } - } - return response; - } - - /** - * Auxiliary method that tales down a connection to the DB. - */ - private void takeDownConnection() { - - if (con != null) { - try { - con.close(); - } catch (SQLException e) { - log.error("PTG CHUNK DAO! Exception in takeDownConnection method: {}", - e.getMessage(), e); - } - } - } - - public Collection find(int[] surlsUniqueIDs, - String[] surlsArray, String dn) throws IllegalArgumentException { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0 || dn == null) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " dn=" + dn); - } - return find(surlsUniqueIDs, surlsArray, dn, true); - } - - public Collection find(int[] surlsUniqueIDs, - String[] surlsArray) throws IllegalArgumentException { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray); - } - return find(surlsUniqueIDs, surlsArray, null, false); - } - - private synchronized Collection find(int[] surlsUniqueIDs, - String[] surlsArray, String dn, boolean withDn) - throws IllegalArgumentException { - - if ((withDn && dn == null) || surlsUniqueIDs == null - || surlsUniqueIDs.length == 0 || surlsArray == null - || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn); - } - if (!checkConnection()) { - log.error("PTG CHUNK DAO: find - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - - try { - - String str = "SELECT rq.ID, rq.r_token, sg.statusCode, rq.pinLifetime, rg.ID, rq.timeStamp, " - + "rq.client_dn, rq.proxy, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID, " - + "d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " - + "LEFT JOIN request_DirOption d ON rg.request_DirOptionID=d.ID " - + "WHERE ( rg.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) - + " AND rg.sourceSURL IN " - + makeSurlString(surlsArray) + " )"; - - if (withDn) { - - str += " AND rq.client_dn=\'" + dn + "\'"; - } - - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - List list = new ArrayList(); - - log.trace("PTG CHUNK DAO - find method: {}", find.toString()); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - PtGChunkDataTO chunkDataTO = null; - while (rs.next()) { - - chunkDataTO = new PtGChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sg.statusCode")); - chunkDataTO.setRequestToken(rs.getString("rq.r_token")); - chunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); - chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); - - chunkDataTO.setNormalizedStFN(rs - .getString("rg.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(new Integer(uniqueID)); - } - - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setLifeTime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setClientDN(rs.getString("rq.client_dn")); - - /** - * This code is only for the 1.3.18. This is a workaround to get FQANs - * using the proxy field on request_queue. The FE use the proxy field of - * request_queue to insert a single FQAN string containing all FQAN - * separeted by the "#" char. The proxy is a BLOB, hence it has to be - * properly conveted in string. - */ - java.sql.Blob blob = rs.getBlob("rq.proxy"); - if (!rs.wasNull() && blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - chunkDataTO.setVomsAttributes(new String(bdata)); - } - chunkDataTO.setDirOption(rs.getBoolean("d.isSourceADirectory")); - chunkDataTO.setAllLevelRecursive(rs.getBoolean("d.allLevelRecursive")); - chunkDataTO.setNumLevel(rs.getInt("d.numOfLevels")); - - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTG CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - private String buildExpainationSet(String explanation) { - - return " sg.explanation='" + explanation + "' "; - } - - private String buildTokenWhereClause(TRequestToken requestToken) { - - return " rq.r_token='" + requestToken.toString() + "' "; - } - - private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) { - - return " ( rg.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rg.sourceSURL IN " - + makeSurlString(surls) + " ) "; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/PtPChunkCatalog.java b/src/main/java/it/grid/storm/catalogs/PtPChunkCatalog.java index df12c1e1b..7b7e42ec7 100644 --- a/src/main/java/it/grid/storm/catalogs/PtPChunkCatalog.java +++ b/src/main/java/it/grid/storm/catalogs/PtPChunkCatalog.java @@ -4,13 +4,41 @@ */ package it.grid.storm.catalogs; -import it.grid.storm.common.types.SizeUnit; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; + import it.grid.storm.common.types.TURLPrefix; import it.grid.storm.common.types.TimeUnit; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; import it.grid.storm.griduser.AbstractGridUser; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; +import it.grid.storm.persistence.converter.FileLifetimeConverter; +import it.grid.storm.persistence.converter.FileStorageTypeConverter; +import it.grid.storm.persistence.converter.OverwriteModeConverter; +import it.grid.storm.persistence.converter.PinLifetimeConverter; +import it.grid.storm.persistence.converter.SizeInBytesIntConverter; +import it.grid.storm.persistence.converter.SpaceTokenStringConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.converter.TURLConverter; +import it.grid.storm.persistence.converter.TransferProtocolListConverter; +import it.grid.storm.persistence.dao.PtPChunkDAO; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtPDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtPPersistentChunkDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidReducedPtPChunkDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.persistence.impl.mysql.PtPChunkDAOMySql; +import it.grid.storm.persistence.model.PtPChunkDataTO; +import it.grid.storm.persistence.model.PtPPersistentChunkData; +import it.grid.storm.persistence.model.ReducedPtPChunkData; +import it.grid.storm.persistence.model.ReducedPtPChunkDataTO; import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; import it.grid.storm.srm.types.InvalidTSURLAttributesException; import it.grid.storm.srm.types.InvalidTSizeAttributesException; @@ -26,575 +54,412 @@ import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.srm.types.TTURL; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Class that represents StoRMs PtPChunkCatalog: it collects PtPChunkData and - * provides methods for looking up a PtPChunkData based on TRequestToken, as - * well as for updating data into persistence. Methods are also supplied to - * evaluate if a SURL is in SRM_SPACE_AVAILABLE state, and to transit expired - * SURLs in SRM_SPACE_AVAILABLE state to SRM_FILE_LIFETIME_EXPIRED. - * - * @author EGRID - ICTP Trieste - * @date June, 2005 - * @version 3.0 - */ public class PtPChunkCatalog { - private static final Logger log = LoggerFactory - .getLogger(PtPChunkCatalog.class); - - /* only instance of PtPChunkCatalog present in StoRM! */ - private static final PtPChunkCatalog cat = new PtPChunkCatalog(); - private final PtPChunkDAO dao = PtPChunkDAO.getInstance(); - - private PtPChunkCatalog() {} - - /** - * Method that returns the only instance of PtPChunkCatalog available. - */ - public static PtPChunkCatalog getInstance() { - - return cat; - } - - /** - * Method used to update into Persistence a retrieved PtPChunkData. - */ - synchronized public void update(PtPPersistentChunkData chunkData) { - - PtPChunkDataTO to = new PtPChunkDataTO(); - /* rimary key needed by DAO Object */ - to.setPrimaryKey(chunkData.getPrimaryKey()); - to.setStatus(StatusCodeConverter.getInstance().toDB( - chunkData.getStatus().getStatusCode())); - to.setErrString(chunkData.getStatus().getExplanation()); - to.setTransferURL(TURLConverter.getInstance().toDB( - chunkData.getTransferURL().toString())); - to.setPinLifetime(PinLifetimeConverter.getInstance().toDB( - chunkData.pinLifetime().value())); - to.setFileLifetime(FileLifetimeConverter.getInstance().toDB( - chunkData.fileLifetime().value())); - to.setFileStorageType(FileStorageTypeConverter.getInstance().toDB( - chunkData.fileStorageType())); - to.setOverwriteOption(OverwriteModeConverter.getInstance().toDB( - chunkData.overwriteOption())); - to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); - to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId())); - to.setClientDN(chunkData.getUser().getDn()); - if (chunkData.getUser() instanceof AbstractGridUser) { - if (((AbstractGridUser) chunkData.getUser()).hasVoms()) { - to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()) - .getFQANsAsString()); - } - - } - dao.update(to); - } - - /** - * Method that returns a Collection of PtPChunkData Objects matching the - * supplied TRequestToken. If any of the data associated to the TRequestToken - * is not well formed and so does not allow a PtPChunkData Object to be - * created, then that part of the request is dropped, gets logged and an - * attempt is made to write in the DB that the chunk was malformed; the - * processing continues with the next part. Only the valid chunks get - * returned. If there are no chunks to process then an empty Collection is - * returned, and a messagge gets logged. NOTE! Chunks in SRM_ABORTED status - * are NOT returned! This is imporant because this method is intended to be - * used by the Feeders to fetch all chunks in the request, and aborted chunks - * should not be picked up for processing! - */ - synchronized public Collection lookup( - final TRequestToken rt) { - - Collection chunkTOs = dao.find(rt); - log.debug("PtPChunkCatalog: retrieved data {}", chunkTOs); - return buildChunkDataList(chunkTOs); - } - - /** - * Private method used to create a PtPChunkData object, from a PtPChunkDataTO - * and TRequestToken. If a chunk cannot be created, an error messagge gets - * logged and an attempt is made to signal in the DB that the chunk is - * malformed. - */ - private PtPPersistentChunkData makeOne(PtPChunkDataTO auxTO, TRequestToken rt) { - - StringBuilder errorSb = new StringBuilder(); - // toSURL - TSURL toSURL = null; - try { - toSURL = TSURL.makeFromStringValidate(auxTO.toSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (auxTO.normalizedStFN() != null) { - toSURL.setNormalizedStFN(auxTO.normalizedStFN()); - } - if (auxTO.surlUniqueID() != null) { - toSURL.setUniqueID(auxTO.surlUniqueID().intValue()); - } - // pinLifetime - TLifeTimeInSeconds pinLifetime = null; - try { - long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM( - auxTO.pinLifetime()); - // Check for max value allowed - long max = Configuration.getInstance().getPinLifetimeMaximum(); - if (pinLifeTime > max) { - log.warn("PinLifeTime is greater than the max value allowed. Drop the " - + "value to the max = {} seconds", max); - pinLifeTime = max; - } - pinLifetime = TLifeTimeInSeconds.make(pinLifeTime, TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // fileLifetime - TLifeTimeInSeconds fileLifetime = null; - try { - fileLifetime = TLifeTimeInSeconds.make(FileLifetimeConverter - .getInstance().toStoRM(auxTO.fileLifetime()), TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // fileStorageType - TFileStorageType fileStorageType = FileStorageTypeConverter.getInstance() - .toSTORM(auxTO.fileStorageType()); - if (fileStorageType == TFileStorageType.EMPTY) { - errorSb.append("\nTFileStorageType could not be translated from " - + "its String representation! String: " + auxTO.fileStorageType()); - // Use the default value defined in Configuration. - fileStorageType = TFileStorageType.getTFileStorageType(Configuration - .getInstance().getDefaultFileStorageType()); - errorSb.append("\nUsed the default TFileStorageType as defined " - + "in StoRM config.: " + fileStorageType); - } - // expectedFileSize - // - // WARNING! A converter is used because the DB uses 0 for empty, whereas - // StoRM object model does allow a 0 size! Since this is an optional - // field - // in the SRM specs, null must be converted explicitly to Empty - // TSizeInBytes - // because it is indeed well formed! - TSizeInBytes expectedFileSize = null; - TSizeInBytes emptySize = TSizeInBytes.makeEmpty(); - long sizeTranslation = SizeInBytesIntConverter.getInstance().toStoRM( - auxTO.expectedFileSize()); - if (emptySize.value() == sizeTranslation) { - expectedFileSize = emptySize; - } else { - try { - expectedFileSize = TSizeInBytes.make(auxTO.expectedFileSize(), - SizeUnit.BYTES); - } catch (InvalidTSizeAttributesException e) { - errorSb.append("\n"); - errorSb.append(e); - } - } - // spaceToken! - // - // WARNING! A converter is still needed because of DB logic for missing - // SpaceToken makes use of NULL, whereas StoRM object model does not - // allow - // for null! It makes use of a specific Empty type. - // - // Indeed, the SpaceToken field is optional, so a request with a null - // value - // for the SpaceToken field in the DB, _is_ well formed! - TSpaceToken spaceToken = null; - TSpaceToken emptyToken = TSpaceToken.makeEmpty(); - /** - * convert empty string representation of DPM into StoRM representation; - */ - String spaceTokenTranslation = SpaceTokenStringConverter.getInstance() - .toStoRM(auxTO.spaceToken()); - if (emptyToken.toString().equals(spaceTokenTranslation)) { - spaceToken = emptyToken; - } else { - try { - spaceToken = TSpaceToken.make(spaceTokenTranslation); - } catch (InvalidTSpaceTokenAttributesException e) { - errorSb.append("\n"); - errorSb.append(e); - } - } - // overwriteOption! - TOverwriteMode overwriteOption = OverwriteModeConverter.getInstance() - .toSTORM(auxTO.overwriteOption()); - if (overwriteOption == TOverwriteMode.EMPTY) { - errorSb.append("\nTOverwriteMode could not be translated " - + "from its String representation! String: " + auxTO.overwriteOption()); - overwriteOption = null; - } - // transferProtocols - TURLPrefix transferProtocols = TransferProtocolListConverter.toSTORM(auxTO - .protocolList()); - if (transferProtocols.size() == 0) { - errorSb.append("\nEmpty list of TransferProtocols " - + "or could not translate TransferProtocols!"); - transferProtocols = null; // fail construction of PtPChunkData! - } - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance() - .toSTORM(auxTO.status()); - if (code == TStatusCode.EMPTY) { - errorSb.append("\nRetrieved StatusCode was not recognised: " - + auxTO.status()); - } else { - status = new TReturnStatus(code, auxTO.errString()); - } - GridUserInterface gridUser = null; - try { - if (auxTO.vomsAttributes() != null - && !auxTO.vomsAttributes().trim().equals("")) { - gridUser = GridUserManager.makeVOMSGridUser(auxTO.clientDN(), - auxTO.vomsAttributesArray()); - } else { - gridUser = GridUserManager.makeGridUser(auxTO.clientDN()); - } - - } catch (IllegalArgumentException e) { - log.error("Unexpected error on voms grid user creation. " - + "IllegalArgumentException: {}", e.getMessage(), e); - } - - // transferURL - /** - * whatever is read is just meaningless because PtP will fill it in!!! So - * create an Empty TTURL by default! Vital to avoid problems with unknown - * DPM NULL/EMPTY logic policy! - */ - TTURL transferURL = TTURL.makeEmpty(); - // make PtPChunkData - PtPPersistentChunkData aux = null; - try { - aux = new PtPPersistentChunkData(gridUser, rt, toSURL, pinLifetime, - fileLifetime, fileStorageType, spaceToken, expectedFileSize, - transferProtocols, overwriteOption, status, transferURL); - aux.setPrimaryKey(auxTO.primaryKey()); - } catch (InvalidPtPPersistentChunkDataAttributesException e) { - dao.signalMalformedPtPChunk(auxTO); - log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" - + " from persistence. Dropping chunk from request: {}", rt); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } catch (InvalidPtPDataAttributesException e) { - dao.signalMalformedPtPChunk(auxTO); - log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" - + " from persistence. Dropping chunk from request: {}", rt); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } catch (InvalidFileTransferDataAttributesException e) { - dao.signalMalformedPtPChunk(auxTO); - log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" - + " from persistence. Dropping chunk from request: {}", rt); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } catch (InvalidSurlRequestDataAttributesException e) { - dao.signalMalformedPtPChunk(auxTO); - log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" - + " from persistence. Dropping chunk from request: {}", rt); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } - // end... - return aux; - } - - /** - * - * Adds to the received PtPChunkDataTO the normalized StFN and the SURL unique - * ID taken from the PtPChunkData - * - * @param chunkTO - * @param chunk - */ - private void completeTO(ReducedPtPChunkDataTO chunkTO, - final ReducedPtPChunkData chunk) { - - chunkTO.setNormalizedStFN(chunk.toSURL().normalizedStFN()); - chunkTO.setSurlUniqueID(new Integer(chunk.toSURL().uniqueId())); - } - - /** - * - * Creates a ReducedPtGChunkDataTO from the received PtGChunkDataTO and - * completes it with the normalized StFN and the SURL unique ID taken from the - * PtGChunkData - * - * @param chunkTO - * @param chunk - * @return - * @throws InvalidReducedPtPChunkDataAttributesException - */ - private ReducedPtPChunkDataTO completeTO(PtPChunkDataTO chunkTO, - final PtPPersistentChunkData chunk) - throws InvalidReducedPtPChunkDataAttributesException { - - ReducedPtPChunkDataTO reducedChunkTO = this.reduce(chunkTO); - this.completeTO(reducedChunkTO, this.reduce(chunk)); - return reducedChunkTO; - } - - /** - * Creates a ReducedPtPChunkData from the data contained in the received - * PtPChunkData - * - * @param chunk - * @return - * @throws InvalidReducedPtPChunkDataAttributesException - */ - private ReducedPtPChunkData reduce(PtPPersistentChunkData chunk) - throws InvalidReducedPtPChunkDataAttributesException { - - ReducedPtPChunkData reducedChunk = new ReducedPtPChunkData(chunk.getSURL(), - chunk.getStatus(), chunk.fileStorageType(), chunk.fileLifetime()); - reducedChunk.setPrimaryKey(chunk.getPrimaryKey()); - return reducedChunk; - } - - /** - * Creates a ReducedPtPChunkDataTO from the data contained in the received - * PtPChunkDataTO - * - * @param chunkTO - * @return - */ - private ReducedPtPChunkDataTO reduce(PtPChunkDataTO chunkTO) { - - ReducedPtPChunkDataTO reducedChunkTO = new ReducedPtPChunkDataTO(); - reducedChunkTO.setPrimaryKey(chunkTO.primaryKey()); - reducedChunkTO.setToSURL(chunkTO.toSURL()); - reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN()); - reducedChunkTO.setSurlUniqueID(chunkTO.surlUniqueID()); - reducedChunkTO.setStatus(chunkTO.status()); - reducedChunkTO.setErrString(chunkTO.errString()); - return reducedChunkTO; - } - - /** - * Checks if the received PtPChunkDataTO contains the fields not set by the - * front end but required - * - * @param chunkTO - * @return - */ - private boolean isComplete(PtPChunkDataTO chunkTO) { - - return (chunkTO.normalizedStFN() != null) - && (chunkTO.surlUniqueID() != null); - } - - /** - * Checks if the received ReducedPtGChunkDataTO contains the fields not set by - * the front end but required - * - * @param reducedChunkTO - * @return - */ - private boolean isComplete(ReducedPtPChunkDataTO reducedChunkTO) { - - return (reducedChunkTO.normalizedStFN() != null) - && (reducedChunkTO.surlUniqueID() != null); - } - - public Collection lookupReducedPtPChunkData( - TRequestToken requestToken, Collection surls) { - - Collection reducedChunkDataTOs = dao.findReduced( - requestToken.getValue(), surls); - log.debug("PtP CHUNK CATALOG: retrieved data {}", reducedChunkDataTOs); - return buildReducedChunkDataList(reducedChunkDataTOs); - } - - public Collection lookupPtPChunkData(TSURL surl, - GridUserInterface user) { - - return lookupPtPChunkData( - (List) Arrays.asList(new TSURL[] { surl }), user); - } - - private Collection lookupPtPChunkData( - List surls, GridUserInterface user) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOs = dao.find(surlsUniqueIDs, - surlsArray, user.getDn()); - log.debug("PtP CHUNK CATALOG: retrieved data {}", chunkDataTOs); - return buildChunkDataList(chunkDataTOs); - } - - private Collection buildChunkDataList( - Collection chunkDataTOs) { - - ArrayList list = new ArrayList(); - PtPPersistentChunkData chunk; - for (PtPChunkDataTO chunkTO : chunkDataTOs) { - chunk = makeOne(chunkTO); - if (chunk == null) { - continue; - } - list.add(chunk); - if (isComplete(chunkTO)) { - continue; - } - try { - dao.updateIncomplete(completeTO(chunkTO, chunk)); - } catch (InvalidReducedPtPChunkDataAttributesException e) { - log.warn("PtG CHUNK CATALOG! unable to add missing informations on " - + "DB to the request: {}", e.getMessage()); - } - } - log.debug("PtPChunkCatalog: returning {}\n\n", list); - return list; - } - - private PtPPersistentChunkData makeOne(PtPChunkDataTO chunkTO) { - - try { - return makeOne(chunkTO, - new TRequestToken(chunkTO.requestToken(), chunkTO.timeStamp())); - } catch (InvalidTRequestTokenAttributesException e) { - throw new IllegalStateException( - "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: " - + e); - } - } - - private Collection buildReducedChunkDataList( - Collection chunkDataTOCollection) { - - ArrayList list = new ArrayList(); - ReducedPtPChunkData reducedChunkData; - for (ReducedPtPChunkDataTO reducedChunkDataTO : chunkDataTOCollection) { - reducedChunkData = makeOneReduced(reducedChunkDataTO); - if (reducedChunkData != null) { - list.add(reducedChunkData); - if (!this.isComplete(reducedChunkDataTO)) { - this.completeTO(reducedChunkDataTO, reducedChunkData); - dao.updateIncomplete(reducedChunkDataTO); - } - } - } - log.debug("PtP CHUNK CATALOG: returning {}", list); - return list; - } - - private ReducedPtPChunkData makeOneReduced( - ReducedPtPChunkDataTO reducedChunkDataTO) { - - StringBuilder errorSb = new StringBuilder(); - // fromSURL - TSURL toSURL = null; - try { - toSURL = TSURL.makeFromStringValidate(reducedChunkDataTO.toSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (reducedChunkDataTO.normalizedStFN() != null) { - toSURL.setNormalizedStFN(reducedChunkDataTO.normalizedStFN()); - } - if (reducedChunkDataTO.surlUniqueID() != null) { - toSURL.setUniqueID(reducedChunkDataTO.surlUniqueID().intValue()); - } - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM( - reducedChunkDataTO.status()); - if (code == TStatusCode.EMPTY) { - errorSb.append("\nRetrieved StatusCode was not recognised: " - + reducedChunkDataTO.status()); - } else { - status = new TReturnStatus(code, reducedChunkDataTO.errString()); - } - // fileStorageType - TFileStorageType fileStorageType = FileStorageTypeConverter.getInstance() - .toSTORM(reducedChunkDataTO.fileStorageType()); - if (fileStorageType == TFileStorageType.EMPTY) { - errorSb.append("\nTFileStorageType could not be " - + "translated from its String representation! String: " - + reducedChunkDataTO.fileStorageType()); - // Use the default value defined in Configuration. - fileStorageType = TFileStorageType.getTFileStorageType(Configuration - .getInstance().getDefaultFileStorageType()); - errorSb - .append("\nUsed the default TFileStorageType as defined in StoRM config.: " - + fileStorageType); - } - // fileLifetime - TLifeTimeInSeconds fileLifetime = null; - try { - fileLifetime = TLifeTimeInSeconds.make(FileLifetimeConverter - .getInstance().toStoRM(reducedChunkDataTO.fileLifetime()), - TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // make ReducedPtPChunkData - ReducedPtPChunkData aux = null; - try { - aux = new ReducedPtPChunkData(toSURL, status, fileStorageType, - fileLifetime); - aux.setPrimaryKey(reducedChunkDataTO.primaryKey()); - } catch (InvalidReducedPtPChunkDataAttributesException e) { - log.warn("PtP CHUNK CATALOG! Retrieved malformed Reduced PtP" - + " chunk data from persistence: dropping reduced chunk..."); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } - // end... - return aux; - } - - public int updateStatus(TRequestToken requestToken, TSURL surl, - TStatusCode statusCode, String explanation) { - - return dao.updateStatus(requestToken, new int[] { surl.uniqueId() }, - new String[] { surl.rawSurl() }, statusCode, explanation); - } - - public int updateFromPreviousStatus(TRequestToken requestToken, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) { - - return dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, - newStatusCode, explanation); - } - - public int updateFromPreviousStatus(TRequestToken requestToken, - List surlList, TStatusCode expectedStatusCode, - TStatusCode newStatusCode) { - - int[] surlsUniqueIDs = new int[surlList.size()]; - String[] surls = new String[surlList.size()]; - int index = 0; - for (TSURL tsurl : surlList) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surls[index] = tsurl.rawSurl(); - index++; - } - return dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode); - } + private static final Logger log = LoggerFactory.getLogger(PtPChunkCatalog.class); + + private static PtPChunkCatalog instance; + + public static synchronized PtPChunkCatalog getInstance() { + if (instance == null) { + instance = new PtPChunkCatalog(); + } + return instance; + } + + private final PtPChunkDAO dao; + + private PtPChunkCatalog() { + dao = PtPChunkDAOMySql.getInstance(); + } + + /** + * Method used to update into Persistence a retrieved PtPChunkData. + */ + public synchronized void update(PtPPersistentChunkData chunkData) { + + PtPChunkDataTO to = new PtPChunkDataTO(); + /* Primary key needed by DAO Object */ + to.setPrimaryKey(chunkData.getPrimaryKey()); + to.setStatus(StatusCodeConverter.getInstance().toDB(chunkData.getStatus().getStatusCode())); + to.setErrString(chunkData.getStatus().getExplanation()); + to.setTransferURL(TURLConverter.getInstance().toDB(chunkData.getTransferURL().toString())); + to.setPinLifetime(PinLifetimeConverter.getInstance().toDB(chunkData.pinLifetime().value())); + to.setFileLifetime(FileLifetimeConverter.getInstance().toDB(chunkData.fileLifetime().value())); + to.setFileStorageType(FileStorageTypeConverter.getInstance().toDB(chunkData.fileStorageType())); + to.setOverwriteOption(OverwriteModeConverter.toDB(chunkData.overwriteOption())); + to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); + to.setSurlUniqueID(Integer.valueOf(chunkData.getSURL().uniqueId())); + to.setClientDN(chunkData.getUser().getDn()); + if (chunkData.getUser() instanceof AbstractGridUser) { + if (((AbstractGridUser) chunkData.getUser()).hasVoms()) { + to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()).getFQANsAsString()); + } + + } + dao.update(to); + } + + /** + * Method that returns a Collection of PtPChunkData Objects matching the supplied TRequestToken. + * If any of the data associated to the TRequestToken is not well formed and so does not allow a + * PtPChunkData Object to be created, then that part of the request is dropped, gets logged and an + * attempt is made to write in the DB that the chunk was malformed; the processing continues with + * the next part. Only the valid chunks get returned. If there are no chunks to process then an + * empty Collection is returned, and a message gets logged. NOTE! Chunks in SRM_ABORTED status are + * NOT returned! This is important because this method is intended to be used by the Feeders to + * fetch all chunks in the request, and aborted chunks should not be picked up for processing! + */ + public synchronized Collection lookup(final TRequestToken rt) { + + Collection chunkTOs = dao.find(rt); + log.debug("PtPChunkCatalog: retrieved data {}", chunkTOs); + return buildChunkDataList(chunkTOs); + } + + /** + * Private method used to create a PtPChunkData object, from a PtPChunkDataTO and TRequestToken. + * If a chunk cannot be created, an error messagge gets logged and an attempt is made to signal in + * the DB that the chunk is malformed. + */ + private PtPPersistentChunkData makeOne(PtPChunkDataTO auxTO, TRequestToken rt) { + + StringBuilder errorSb = new StringBuilder(); + // toSURL + TSURL toSURL = null; + try { + toSURL = TSURL.makeFromStringValidate(auxTO.toSURL()); + } catch (InvalidTSURLAttributesException e) { + errorSb.append(e); + } + if (auxTO.normalizedStFN() != null) { + toSURL.setNormalizedStFN(auxTO.normalizedStFN()); + } + if (auxTO.surlUniqueID() != null) { + toSURL.setUniqueID(auxTO.surlUniqueID().intValue()); + } + // pinLifetime + TLifeTimeInSeconds pinLifetime = null; + try { + long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM(auxTO.pinLifetime()); + // Check for max value allowed + long max = StormConfiguration.getInstance().getPinLifetimeMaximum(); + if (pinLifeTime > max) { + log.warn("PinLifeTime is greater than the max value allowed. Drop the " + + "value to the max = {} seconds", max); + pinLifeTime = max; + } + pinLifetime = TLifeTimeInSeconds.make(pinLifeTime, TimeUnit.SECONDS); + } catch (IllegalArgumentException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // fileLifetime + TLifeTimeInSeconds fileLifetime = null; + try { + fileLifetime = TLifeTimeInSeconds + .make(FileLifetimeConverter.getInstance().toStoRM(auxTO.fileLifetime()), TimeUnit.SECONDS); + } catch (IllegalArgumentException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // fileStorageType + TFileStorageType fileStorageType = + FileStorageTypeConverter.getInstance().toSTORM(auxTO.fileStorageType()); + if (fileStorageType == TFileStorageType.EMPTY) { + errorSb.append("\nTFileStorageType could not be translated from " + + "its String representation! String: " + auxTO.fileStorageType()); + // Use the default value defined in Configuration. + fileStorageType = TFileStorageType + .getTFileStorageType(StormConfiguration.getInstance().getDefaultFileStorageType()); + errorSb.append("\nUsed the default TFileStorageType as defined " + "in StoRM config.: " + + fileStorageType); + } + // expectedFileSize + // + // WARNING! A converter is used because the DB uses 0 for empty, whereas + // StoRM object model does allow a 0 size! Since this is an optional + // field + // in the SRM specs, null must be converted explicitly to Empty + // TSizeInBytes + // because it is indeed well formed! + TSizeInBytes expectedFileSize = null; + TSizeInBytes emptySize = TSizeInBytes.makeEmpty(); + long sizeTranslation = SizeInBytesIntConverter.getInstance().toStoRM(auxTO.expectedFileSize()); + if (emptySize.value() == sizeTranslation) { + expectedFileSize = emptySize; + } else { + try { + expectedFileSize = TSizeInBytes.make(auxTO.expectedFileSize()); + } catch (InvalidTSizeAttributesException e) { + errorSb.append("\n"); + errorSb.append(e); + } + } + // spaceToken! + // + // WARNING! A converter is still needed because of DB logic for missing + // SpaceToken makes use of NULL, whereas StoRM object model does not + // allow + // for null! It makes use of a specific Empty type. + // + // Indeed, the SpaceToken field is optional, so a request with a null + // value + // for the SpaceToken field in the DB, _is_ well formed! + TSpaceToken spaceToken = null; + TSpaceToken emptyToken = TSpaceToken.makeEmpty(); + /** + * convert empty string representation of DPM into StoRM representation; + */ + String spaceTokenTranslation = SpaceTokenStringConverter.toStoRM(auxTO.spaceToken()); + if (emptyToken.toString().equals(spaceTokenTranslation)) { + spaceToken = emptyToken; + } else { + try { + spaceToken = TSpaceToken.make(spaceTokenTranslation); + } catch (InvalidTSpaceTokenAttributesException e) { + errorSb.append("\n"); + errorSb.append(e); + } + } + // overwriteOption! + TOverwriteMode overwriteOption = OverwriteModeConverter.toSTORM(auxTO.overwriteOption()); + if (overwriteOption == TOverwriteMode.EMPTY) { + errorSb.append("\nTOverwriteMode could not be translated " + + "from its String representation! String: " + auxTO.overwriteOption()); + overwriteOption = null; + } + // transferProtocols + TURLPrefix transferProtocols = TransferProtocolListConverter.toSTORM(auxTO.protocolList()); + if (transferProtocols.size() == 0) { + errorSb + .append("\nEmpty list of TransferProtocols " + "or could not translate TransferProtocols!"); + transferProtocols = null; // fail construction of PtPChunkData! + } + // status + TReturnStatus status = null; + TStatusCode code = StatusCodeConverter.getInstance().toSTORM(auxTO.status()); + if (code == TStatusCode.EMPTY) { + errorSb.append("\nRetrieved StatusCode was not recognised: " + auxTO.status()); + } else { + status = new TReturnStatus(code, auxTO.errString()); + } + GridUserInterface gridUser = null; + try { + if (auxTO.vomsAttributes() != null && !auxTO.vomsAttributes().trim().equals("")) { + gridUser = GridUserManager.makeVOMSGridUser(auxTO.clientDN(), auxTO.vomsAttributesArray()); + } else { + gridUser = GridUserManager.makeGridUser(auxTO.clientDN()); + } + + } catch (IllegalArgumentException e) { + log.error("Unexpected error on voms grid user creation. " + "IllegalArgumentException: {}", + e.getMessage(), e); + } + + // transferURL + /** + * whatever is read is just meaningless because PtP will fill it in!!! So create an Empty TTURL + * by default! Vital to avoid problems with unknown DPM NULL/EMPTY logic policy! + */ + TTURL transferURL = TTURL.makeEmpty(); + // make PtPChunkData + PtPPersistentChunkData aux = null; + try { + aux = new PtPPersistentChunkData(gridUser, rt, toSURL, pinLifetime, fileLifetime, + fileStorageType, spaceToken, expectedFileSize, transferProtocols, overwriteOption, status, + transferURL); + aux.setPrimaryKey(auxTO.primaryKey()); + } catch (InvalidPtPPersistentChunkDataAttributesException e) { + dao.fail(auxTO); + log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" + + " from persistence. Dropping chunk from request: {}", rt); + log.warn(e.getMessage(), e); + log.warn(errorSb.toString()); + } catch (InvalidPtPDataAttributesException e) { + dao.fail(auxTO); + log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" + + " from persistence. Dropping chunk from request: {}", rt); + log.warn(e.getMessage(), e); + log.warn(errorSb.toString()); + } catch (InvalidFileTransferDataAttributesException e) { + dao.fail(auxTO); + log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" + + " from persistence. Dropping chunk from request: {}", rt); + log.warn(e.getMessage(), e); + log.warn(errorSb.toString()); + } catch (InvalidSurlRequestDataAttributesException e) { + dao.fail(auxTO); + log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" + + " from persistence. Dropping chunk from request: {}", rt); + log.warn(e.getMessage(), e); + log.warn(errorSb.toString()); + } + // end... + return aux; + } + + /** + * + * Adds to the received PtPChunkDataTO the normalized StFN and the SURL unique ID taken from the + * PtPChunkData + * + * @param chunkTO + * @param chunk + */ + private void completeTO(ReducedPtPChunkDataTO chunkTO, final ReducedPtPChunkData chunk) { + + chunkTO.setNormalizedStFN(chunk.toSURL().normalizedStFN()); + chunkTO.setSurlUniqueID(Integer.valueOf(chunk.toSURL().uniqueId())); + } + + /** + * + * Creates a ReducedPtGChunkDataTO from the received PtGChunkDataTO and completes it with the + * normalized StFN and the SURL unique ID taken from the PtGChunkData + * + * @param chunkTO + * @param chunk + * @return + * @throws InvalidReducedPtPChunkDataAttributesException + */ + private ReducedPtPChunkDataTO completeTO(PtPChunkDataTO chunkTO, + final PtPPersistentChunkData chunk) throws InvalidReducedPtPChunkDataAttributesException { + + ReducedPtPChunkDataTO reducedChunkTO = this.reduce(chunkTO); + this.completeTO(reducedChunkTO, this.reduce(chunk)); + return reducedChunkTO; + } + + /** + * Creates a ReducedPtPChunkData from the data contained in the received PtPChunkData + * + * @param chunk + * @return + * @throws InvalidReducedPtPChunkDataAttributesException + */ + private ReducedPtPChunkData reduce(PtPPersistentChunkData chunk) + throws InvalidReducedPtPChunkDataAttributesException { + + ReducedPtPChunkData reducedChunk = new ReducedPtPChunkData(chunk.getSURL(), chunk.getStatus(), + chunk.fileStorageType(), chunk.fileLifetime()); + reducedChunk.setPrimaryKey(chunk.getPrimaryKey()); + return reducedChunk; + } + + /** + * Creates a ReducedPtPChunkDataTO from the data contained in the received PtPChunkDataTO + * + * @param chunkTO + * @return + */ + private ReducedPtPChunkDataTO reduce(PtPChunkDataTO chunkTO) { + + ReducedPtPChunkDataTO reducedChunkTO = new ReducedPtPChunkDataTO(); + reducedChunkTO.setPrimaryKey(chunkTO.primaryKey()); + reducedChunkTO.setToSURL(chunkTO.toSURL()); + reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN()); + reducedChunkTO.setSurlUniqueID(chunkTO.surlUniqueID()); + reducedChunkTO.setStatus(chunkTO.status()); + reducedChunkTO.setErrString(chunkTO.errString()); + return reducedChunkTO; + } + + /** + * Checks if the received PtPChunkDataTO contains the fields not set by the front end but required + * + * @param chunkTO + * @return + */ + private boolean isComplete(PtPChunkDataTO chunkTO) { + + return (chunkTO.normalizedStFN() != null) && (chunkTO.surlUniqueID() != null); + } + + public Collection lookupPtPChunkData(TSURL surl, GridUserInterface user) { + + return lookupPtPChunkData((List) Arrays.asList(new TSURL[] {surl}), user); + } + + private Collection lookupPtPChunkData(List surls, + GridUserInterface user) { + + int[] surlsUniqueIDs = new int[surls.size()]; + String[] surlsArray = new String[surls.size()]; + int index = 0; + for (TSURL tsurl : surls) { + surlsUniqueIDs[index] = tsurl.uniqueId(); + surlsArray[index] = tsurl.rawSurl(); + index++; + } + Collection chunkDataTOs = dao.find(surlsUniqueIDs, surlsArray, user.getDn()); + log.debug("PtP CHUNK CATALOG: retrieved data {}", chunkDataTOs); + return buildChunkDataList(chunkDataTOs); + } + + private Collection buildChunkDataList( + Collection chunkDataTOs) { + + Collection list = Lists.newArrayList(); + PtPPersistentChunkData chunk; + for (PtPChunkDataTO chunkTO : chunkDataTOs) { + chunk = makeOne(chunkTO); + if (chunk == null) { + continue; + } + list.add(chunk); + if (isComplete(chunkTO)) { + continue; + } + try { + dao.updateIncomplete(completeTO(chunkTO, chunk)); + } catch (InvalidReducedPtPChunkDataAttributesException e) { + log.warn( + "PtG CHUNK CATALOG! unable to add missing informations on " + "DB to the request: {}", + e.getMessage()); + } + } + log.debug("PtPChunkCatalog: returning {}\n\n", list); + return list; + } + + private PtPPersistentChunkData makeOne(PtPChunkDataTO chunkTO) { + + try { + return makeOne(chunkTO, new TRequestToken(chunkTO.requestToken(), chunkTO.timeStamp())); + } catch (InvalidTRequestTokenAttributesException e) { + throw new IllegalStateException( + "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: " + e); + } + } + + public int updateStatus(TRequestToken requestToken, TSURL surl, TStatusCode statusCode, + String explanation) { + + return dao.updateStatus(requestToken, new int[] {surl.uniqueId()}, + new String[] {surl.rawSurl()}, statusCode, explanation); + } + + public int updateFromPreviousStatus(TRequestToken requestToken, TStatusCode expectedStatusCode, + TStatusCode newStatusCode, String explanation) { + + return dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, newStatusCode, + explanation); + } + + public int updateFromPreviousStatus(TRequestToken requestToken, List surlList, + TStatusCode expectedStatusCode, TStatusCode newStatusCode) { + + int[] surlsUniqueIDs = new int[surlList.size()]; + String[] surls = new String[surlList.size()]; + int index = 0; + for (TSURL tsurl : surlList) { + surlsUniqueIDs[index] = tsurl.uniqueId(); + surls[index] = tsurl.rawSurl(); + index++; + } + return dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, expectedStatusCode, + newStatusCode); + } } diff --git a/src/main/java/it/grid/storm/catalogs/PtPChunkDAO.java b/src/main/java/it/grid/storm/catalogs/PtPChunkDAO.java deleted file mode 100644 index 388c7853a..000000000 --- a/src/main/java/it/grid/storm/catalogs/PtPChunkDAO.java +++ /dev/null @@ -1,1670 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import static it.grid.storm.catalogs.ChunkDAOUtils.buildInClauseForArray; -import static it.grid.storm.catalogs.ChunkDAOUtils.printWarnings; -import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED; -import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; -import static it.grid.storm.srm.types.TStatusCode.SRM_FILE_LIFETIME_EXPIRED; -import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_INPROGRESS; -import static it.grid.storm.srm.types.TStatusCode.SRM_SPACE_AVAILABLE; -import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS; - -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; - -import it.grid.storm.config.Configuration; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.naming.SURL; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TStatusCode; - -import org.apache.commons.lang.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.Collection; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Timer; -import java.util.TimerTask; - -/** - * DAO class for PtPChunkCatalog. This DAO is specifically designed to connect - * to a MySQL DB. The raw data found in those tables is pre-treated in order to - * turn it into the Object Model of StoRM. See Method comments for further info. - * BEWARE! DAO Adjusts for extra fields in the DB that are not present in the - * object model. - * - * @author EGRID ICTP - * @version 2.0 - * @date June 2005 - */ -public class PtPChunkDAO { - - private static final Logger log = LoggerFactory.getLogger(PtPChunkDAO.class); - - /* String with the name of the class for the DB driver */ - private final String driver = Configuration.getInstance().getDBDriver(); - /* String referring to the URL of the DB */ - private final String url = Configuration.getInstance().getStormDbURL(); - /* String with the password for the DB */ - private final String password = Configuration.getInstance().getDBPassword(); - /* String with the name for the DB */ - private final String name = Configuration.getInstance().getDBUserName(); - /* Connection to DB - WARNING!!! It is kept open all the time! */ - private Connection con = null; - - private static final PtPChunkDAO dao = new PtPChunkDAO(); - - /* timer thread that will run a task to alert when reconnecting is necessary! */ - private Timer clock = null; - /* - * timer task that will update the boolean signaling that a reconnection is - * needed - */ - private TimerTask clockTask = null; - /* milliseconds that must pass before reconnecting to DB */ - private final long period = Configuration.getInstance().getDBReconnectPeriod() * 1000; - /* initial delay in milliseconds before starting timer */ - private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000; - - /* boolean that tells whether reconnection is needed because of MySQL bug! */ - private boolean reconnect = false; - - private StatusCodeConverter statusCodeConverter = StatusCodeConverter.getInstance(); - - private PtPChunkDAO() { - - setUpConnection(); - clock = new Timer(); - clockTask = new TimerTask() { - - @Override - public void run() { - - reconnect = true; - } - }; // clock task - clock.scheduleAtFixedRate(clockTask, delay, period); - } - - /** - * Method that returns the only instance of the PtPChunkDAO. - */ - public static PtPChunkDAO getInstance() { - - return dao; - } - - /** - * Method used to save the changes made to a retrieved PtPChunkDataTO, back - * into the MySQL DB. Only the transferURL, statusCode and explanation, of - * status_Put table get written to the DB. Likewise for the pinLifetime and - * fileLifetime of request_queue. In case of any error, an error messagge gets - * logged but no exception is thrown. - */ - public synchronized void update(PtPChunkDataTO to) { - - if (!checkConnection()) { - log.error("PtP CHUNK DAO: update - unable to get a valid connection!"); - return; - } - PreparedStatement updatePut = null; - try { - // prepare statement... - updatePut = con - .prepareStatement("UPDATE " - + "request_queue rq JOIN (status_Put sp, request_Put rp) ON " - + "(rq.ID=rp.request_queueID AND sp.request_PutID=rp.ID) " - + "SET sp.transferURL=?, sp.statusCode=?, sp.explanation=?, rq.pinLifetime=?, rq.fileLifetime=?, rq.config_FileStorageTypeID=?, rq.config_OverwriteID=?, " - + "rp.normalized_targetSURL_StFN=?, rp.targetSURL_uniqueID=? " - + "WHERE rp.ID=?"); - printWarnings(con.getWarnings()); - - updatePut.setString(1, to.transferURL()); - printWarnings(updatePut.getWarnings()); - - updatePut.setInt(2, to.status()); - printWarnings(updatePut.getWarnings()); - - updatePut.setString(3, to.errString()); - printWarnings(updatePut.getWarnings()); - - updatePut.setInt(4, to.pinLifetime()); - printWarnings(updatePut.getWarnings()); - - updatePut.setInt(5, to.fileLifetime()); - printWarnings(updatePut.getWarnings()); - - updatePut.setString(6, to.fileStorageType()); - printWarnings(updatePut.getWarnings()); - - updatePut.setString(7, to.overwriteOption()); - printWarnings(updatePut.getWarnings()); - - updatePut.setString(8, to.normalizedStFN()); - printWarnings(updatePut.getWarnings()); - - updatePut.setInt(9, to.surlUniqueID()); - printWarnings(updatePut.getWarnings()); - - updatePut.setLong(10, to.primaryKey()); - printWarnings(updatePut.getWarnings()); - // run updateStatusPut... - log.trace("PtP CHUNK DAO - update method: {}", updatePut); - updatePut.executeUpdate(); - printWarnings(updatePut.getWarnings()); - } catch (SQLException e) { - log.error("PtP CHUNK DAO: Unable to complete update! {}", e.getMessage(), e); - } finally { - close(updatePut); - } - } - - /** - * Updates the request_Put represented by the received ReducedPtPChunkDataTO - * by setting its normalized_targetSURL_StFN and targetSURL_uniqueID - * - * @param chunkTO - */ - public synchronized void updateIncomplete(ReducedPtPChunkDataTO chunkTO) { - - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: updateIncomplete - unable to get a valid connection!"); - return; - } - String str = "UPDATE request_Put SET normalized_targetSURL_StFN=?, targetSURL_uniqueID=? " - + "WHERE ID=?"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - stmt.setString(1, chunkTO.normalizedStFN()); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2, chunkTO.surlUniqueID()); - printWarnings(stmt.getWarnings()); - - stmt.setLong(3, chunkTO.primaryKey()); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - update incomplete: {}", stmt); - stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - } catch (SQLException e) { - log.error("PtP CHUNK DAO: Unable to complete update incomplete! {}", - e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method used to refresh the PtPChunkDataTO information from the MySQL DB. - * This method is intended to be used during the srmAbortRequest/File - * operation. In case of any error, an error message gets logged but no - * exception is thrown; a null PtPChunkDataTO is returned. - */ - public synchronized PtPChunkDataTO refresh(long id) { - - if (!checkConnection()) { - log.error("PtP CHUNK DAO: refresh - unable to get a valid connection!"); - return null; - } - String prot = "SELECT tp.config_ProtocolsID FROM request_TransferProtocols tp " - + "WHERE tp.request_queueID IN " - + "(SELECT rp.request_queueID FROM request_Put rp WHERE rp.ID=?)"; - - String refresh = "SELECT rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, rq.s_token, rq.r_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, sp.statusCode, sp.transferURL " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rq.ID=rp.request_queueID AND sp.request_PutID=rp.ID) " - + "WHERE rp.ID=?"; - - PreparedStatement stmt = null; - ResultSet rs = null; - PtPChunkDataTO chunkDataTO = null; - - try { - // get protocols for the request - stmt = con.prepareStatement(prot); - printWarnings(con.getWarnings()); - - List protocols = Lists.newArrayList(); - stmt.setLong(1, id); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - refresh method: {}", stmt); - rs = stmt.executeQuery(); - printWarnings(stmt.getWarnings()); - while (rs.next()) { - protocols.add(rs.getString("tp.config_ProtocolsID")); - } - close(rs); - close(stmt); - - // get chunk of the request - stmt = con.prepareStatement(refresh); - printWarnings(con.getWarnings()); - - stmt.setLong(1, id); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - refresh method: {}", stmt); - rs = stmt.executeQuery(); - printWarnings(stmt.getWarnings()); - - if (rs.next()) { - chunkDataTO = new PtPChunkDataTO(); - chunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setPinLifetime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setFileLifetime(rs.getInt("rq.fileLifetime")); - chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); - chunkDataTO.setRequestToken(rs.getString("rq.r_token")); - chunkDataTO.setPrimaryKey(rs.getLong("rp.ID")); - chunkDataTO.setToSURL(rs.getString("rp.targetSURL")); - chunkDataTO.setNormalizedStFN(rs - .getString("rp.normalized_targetSURL_StFN")); - int uniqueID = rs.getInt("rp.targetSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); - } - - chunkDataTO.setExpectedFileSize(rs.getLong("rp.expectedFileSize")); - chunkDataTO.setProtocolList(protocols); - chunkDataTO.setStatus(rs.getInt("sp.statusCode")); - chunkDataTO.setTransferURL(rs.getString("sp.transferURL")); - chunkDataTO.setClientDN(rs.getString("rq.client_dn")); - - /** - * This code is only for the 1.3.18. This is a workaround to get FQANs - * using the proxy field on request_queue. The FE use the proxy field of - * request_queue to insert a single FQAN string containing all FQAN - * separated by the "#" char. The proxy is a BLOB, hence it has to be - * properly converted in string. - */ - java.sql.Blob blob = rs.getBlob("rq.proxy"); - if (!rs.wasNull() && blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - chunkDataTO.setVomsAttributes(new String(bdata)); - } - if (rs.next()) { - log.warn("ATTENTION in PtP CHUNK DAO! Possible DB corruption! " - + "refresh method invoked for specific chunk with id {}, but found " - + "more than one such chunks!", id); - } - } else { - log.warn("ATTENTION in PtP CHUNK DAO! Possible DB corruption! " - + "refresh method invoked for specific chunk with id {}, but chunk " - + "NOT found in persistence!", id); - } - } catch (SQLException e) { - log.error("PtP CHUNK DAO! Unable to refresh chunk! {}", e.getMessage(), e); - chunkDataTO = null; - } finally { - close(rs); - close(stmt); - } - return chunkDataTO; - } - - /** - * Method that queries the MySQL DB to find all entries matching the supplied - * TRequestToken. The Collection contains the corresponding PtPChunkDataTO - * objects. An initial simple query establishes the list of protocols - * associated with the request. A second complex query establishes all chunks - * associated with the request, by properly joining request_queue, request_Put - * and status_Put. The considered fields are: (1) From status_Put: the ID - * field which becomes the TOs primary key, and statusCode. (2) From - * request_Put: targetSURL and expectedFileSize. (3) From request_queue: - * pinLifetime, fileLifetime, config_FileStorageTypeID, s_token, - * config_OverwriteID. In case of any error, a log gets written and an empty - * collection is returned. No exception is returned. NOTE! Chunks in - * SRM_ABORTED status are NOT returned! This is important because this method - * is intended to be used by the Feeders to fetch all chunks in the request, - * and aborted chunks should not be picked up for processing! - */ - public synchronized Collection find(TRequestToken requestToken) { - - if (!checkConnection()) { - log.error("PtP CHUNK DAO: find - unable to get a valid connection!"); - return null; - } - String strToken = requestToken.toString(); - String str = null; - PreparedStatement find = null; - ResultSet rs = null; - try { - str = "SELECT tp.config_ProtocolsID " - + "FROM request_TransferProtocols tp JOIN request_queue rq ON tp.request_queueID=rq.ID " - + "WHERE rq.r_token=?"; - - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - List protocols = Lists.newArrayList(); - find.setString(1, strToken); - printWarnings(find.getWarnings()); - - log.trace("PtP CHUNK DAO - find method: {}", find); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - while (rs.next()) { - protocols.add(rs.getString("tp.config_ProtocolsID")); - } - close(rs); - close(find); - - // get chunks of the request - str = "SELECT rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, rq.s_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " - + "WHERE rq.r_token=? AND sp.statusCode<>?"; - - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - List list = Lists.newArrayList(); - find.setString(1, strToken); - printWarnings(find.getWarnings()); - - find.setInt(2, - statusCodeConverter.toDB(SRM_ABORTED)); - printWarnings(find.getWarnings()); - - log.trace("PtP CHUNK DAO - find method: {}", find); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - PtPChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new PtPChunkDataTO(); - chunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setPinLifetime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setFileLifetime(rs.getInt("rq.fileLifetime")); - chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); - chunkDataTO.setClientDN(rs.getString("rq.client_dn")); - - /** - * This code is only for the 1.3.18. This is a workaround to get FQANs - * using the proxy field on request_queue. The FE use the proxy field of - * request_queue to insert a single FQAN string containing all FQAN - * separated by the "#" char. The proxy is a BLOB, hence it has to be - * properly converted in string. - */ - java.sql.Blob blob = rs.getBlob("rq.proxy"); - if (!rs.wasNull() && blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - chunkDataTO.setVomsAttributes(new String(bdata)); - } - chunkDataTO.setPrimaryKey(rs.getLong("rp.ID")); - chunkDataTO.setToSURL(rs.getString("rp.targetSURL")); - - chunkDataTO.setNormalizedStFN(rs - .getString("rp.normalized_targetSURL_StFN")); - int uniqueID = rs.getInt("rp.targetSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); - } - - chunkDataTO.setExpectedFileSize(rs.getLong("rp.expectedFileSize")); - chunkDataTO.setProtocolList(protocols); - chunkDataTO.setRequestToken(strToken); - chunkDataTO.setStatus(rs.getInt("sp.statusCode")); - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTP CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return Lists.newArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns a Collection of ReducedPtPChunkDataTO associated to the - * given TRequestToken expressed as String. - */ - public synchronized Collection findReduced( - String reqtoken, Collection surls) { - - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: findReduced - unable to get a valid connection!"); - return Lists.newArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - boolean addInClause = surls != null && !surls.isEmpty(); - try { - // get reduced chunks - String str = "SELECT rq.fileLifetime, rq.config_FileStorageTypeID, rp.ID, rp.targetSURL, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " - + "WHERE rq.r_token=?"; - if (addInClause) { - str += " AND rp.targetSURL_uniqueID IN ("; - for (int i=0; i list = Lists.newArrayList(); - find.setString(1, reqtoken); - printWarnings(find.getWarnings()); - if (addInClause) { - Iterator iterator = surls.iterator(); - int start = 2; - while (iterator.hasNext()) { - TSURL surl = iterator.next(); - find.setInt(start++, surl.uniqueId()); - } - } - printWarnings(find.getWarnings()); - log.trace("PtP CHUNK DAO! findReduced with request token; {}", find); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - ReducedPtPChunkDataTO reducedChunkDataTO = null; - while (rs.next()) { - reducedChunkDataTO = new ReducedPtPChunkDataTO(); - reducedChunkDataTO.setFileLifetime(rs.getInt("rq.fileLifetime")); - reducedChunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - reducedChunkDataTO.setPrimaryKey(rs.getLong("rp.ID")); - reducedChunkDataTO.setToSURL(rs.getString("rp.targetSURL")); - reducedChunkDataTO.setNormalizedStFN(rs - .getString("rp.normalized_targetSURL_StFN")); - int uniqueID = rs.getInt("rp.targetSURL_uniqueID"); - if (!rs.wasNull()) { - reducedChunkDataTO.setSurlUniqueID(uniqueID); - } - - reducedChunkDataTO.setStatus(rs.getInt("sp.statusCode")); - list.add(reducedChunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTP CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return Lists.newArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns a Collection of ReducedPtPChunkDataTO corresponding to - * the IDs supplied in the given List of Long. If the List is null or empty, - * an empty collection is returned and error messages get logged. - */ - public synchronized Collection findReduced( - List ids) { - - if (ids != null && !ids.isEmpty()) { - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: findReduced - unable to get a valid connection!"); - return Lists.newArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - // get reduced chunks - String str = "SELECT rq.fileLifetime, rq.config_FileStorageTypeID, rp.ID, rp.targetSURL, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " - + "WHERE rp.ID IN (" + StringUtils.join(ids.toArray(), ',') + ")"; - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - List list = Lists.newArrayList(); - log.trace("PtP CHUNK DAO! fetchReduced; {}", find); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - ReducedPtPChunkDataTO reducedChunkDataTO = null; - while (rs.next()) { - reducedChunkDataTO = new ReducedPtPChunkDataTO(); - reducedChunkDataTO.setFileLifetime(rs.getInt("rq.fileLifetime")); - reducedChunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - reducedChunkDataTO.setPrimaryKey(rs.getLong("rp.ID")); - reducedChunkDataTO.setToSURL(rs.getString("rp.targetSURL")); - reducedChunkDataTO.setNormalizedStFN(rs - .getString("rp.normalized_targetSURL_StFN")); - int uniqueID = rs.getInt("rp.targetSURL_uniqueID"); - if (!rs.wasNull()) { - reducedChunkDataTO.setSurlUniqueID(uniqueID); - } - - reducedChunkDataTO.setStatus(rs.getInt("sp.statusCode")); - list.add(reducedChunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTP CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection */ - return Lists.newArrayList(); - } finally { - close(rs); - close(find); - } - } else { - log.warn("ATTENTION in PtP CHUNK DAO! fetchReduced " - + "invoked with null or empty list of IDs!"); - return Lists.newArrayList(); - } - } - - /** - * Method used in extraordinary situations to signal that data retrieved from - * the DB was malformed and could not be translated into the StoRM object - * model. This method attempts to change the status of the chunk to - * SRM_FAILURE and record it in the DB, in the status_Put table. This - * operation could potentially fail because the source of the malformed - * problems could be a problematic DB; indeed, initially only log messages - * were recorded. Yet it soon became clear that the source of malformed data - * were actually the clients themselves and/or FE recording in the DB. In - * these circumstances the client would find its request as being in the - * SRM_IN_PROGRESS state for ever. Hence the pressing need to inform it of the - * encountered problems. - */ - public synchronized void signalMalformedPtPChunk(PtPChunkDataTO auxTO) { - - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: signalMalformedPtPChunk - unable to get a valid connection!"); - return; - } - String signalSQL = "UPDATE status_Put sp SET sp.statusCode=" - + statusCodeConverter.toDB(SRM_FAILURE) - + ", sp.explanation=? " + "WHERE sp.request_PutID=" + auxTO.primaryKey(); - PreparedStatement signal = null; - try { - signal = con.prepareStatement(signalSQL); - printWarnings(con.getWarnings()); - /* NB: Prepared statement spares DB-specific String notation! */ - signal.setString(1, "This chunk of the request is malformed!"); - printWarnings(signal.getWarnings()); - - log.trace("PtP CHUNK DAO - signalMalformedPtPChunk method: {}", signal); - signal.executeUpdate(); - printWarnings(signal.getWarnings()); - } catch (SQLException e) { - log.error("PtPChunkDAO! Unable to signal in DB that a chunk of " - + "the request was malformed! Request: {}; Error: {}", auxTO.toString(), - e.getMessage(), e); - } finally { - close(signal); - } - } - - /** - * Method that returns the number of Put requests on the given SURL, that are - * in SRM_SPACE_AVAILABLE state. This method is intended to be used by - * PtPChunkCatalog in the isSRM_SPACE_AVAILABLE method invocation. In case of - * any error, 0 is returned. - */ - public synchronized int numberInSRM_SPACE_AVAILABLE(int surlUniqueID) { - - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: numberInSRM_SPACE_AVAILABLE - unable to get a valid connection!"); - return 0; - } - - String str = "SELECT COUNT(rp.ID) FROM status_Put sp JOIN request_Put rp " - + "ON (sp.request_PutID=rp.ID) " - + "WHERE rp.targetSURL_uniqueID=? AND sp.statusCode=?"; - PreparedStatement stmt = null; - ResultSet rs = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - /* Prepared statement spares DB-specific String notation! */ - stmt.setInt(1, surlUniqueID); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2,statusCodeConverter.toDB(SRM_SPACE_AVAILABLE)); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - numberInSRM_SPACE_AVAILABLE method: {}", stmt); - rs = stmt.executeQuery(); - printWarnings(stmt.getWarnings()); - - int numberSpaceAvailable = 0; - if (rs.next()) { - numberSpaceAvailable = rs.getInt(1); - } - return numberSpaceAvailable; - } catch (SQLException e) { - log.error("PtPChunkDAO! Unable to determine " - + "numberInSRM_SPACE_AVAILABLE! Returning 0! {}", e.getMessage(), e); - return 0; - } finally { - close(rs); - close(stmt); - } - } - - /** - * Method that retrieves all expired requests in SRM_SPACE_AVAILABLE state. - * - * @return a Map containing the ID of the request as key and the relative - * SURL as value - */ - public synchronized Map getExpiredSRM_SPACE_AVAILABLE() { - - Map ids = Maps.newHashMap(); - - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: getExpiredSRM_SPACE_AVAILABLE - unable to get a valid connection!"); - return ids; - } - - String idsstr = "SELECT rp.ID, rp.targetSURL FROM " - + "status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " - + "WHERE sp.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; - - PreparedStatement stmt = null; - ResultSet rs = null; - - try { - stmt = con.prepareStatement(idsstr); - printWarnings(con.getWarnings()); - - stmt.setInt(1, statusCodeConverter.toDB(SRM_SPACE_AVAILABLE)); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - getExpiredSRM_SPACE_AVAILABLE: {}", stmt); - - rs = stmt.executeQuery(); - printWarnings(stmt.getWarnings()); - - while (rs.next()) { - ids.put(rs.getLong("rp.ID"), rs.getString("rp.targetSURL")); - } - } catch (SQLException e) { - log.error("PtPChunkDAO! Unable to select expired " - + "SRM_SPACE_AVAILABLE chunks of PtP requests. {}", e.getMessage(), e); - - } finally { - close(rs); - close(stmt); - } - return ids; - } - - /** - * Method that retrieves all ptp requests in SRM_REQUEST_INPROGRESS state which can be - * considered as expired. - * - * @return a Map containing the ID of the request as key and the involved array of SURLs as - * value - */ - public synchronized List getExpiredSRM_REQUEST_INPROGRESS(long expirationTime) { - - List ids = Lists.newArrayList(); - - if (!checkConnection()) { - log.error( - "PtP CHUNK DAO: getExpiredSRM_REQUEST_INPROGRESS - unable to get a valid connection!"); - return ids; - } - - String query = "SELECT rq.ID FROM request_queue rq, request_Put rp, status_Put sp " - + "WHERE rq.ID = rp.request_queueID and rp.ID = sp.request_PutID " - + "AND rq.status=? AND rq.timeStamp <= DATE_SUB(CURRENT_TIMESTAMP(), INTERVAL ? SECOND)"; - - PreparedStatement stmt = null; - ResultSet rs = null; - - try { - stmt = con.prepareStatement(query); - printWarnings(con.getWarnings()); - - stmt.setLong(1, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); - printWarnings(stmt.getWarnings()); - - stmt.setLong(2, expirationTime); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - getExpiredSRM_REQUEST_INPROGRESS: {}", stmt); - - rs = stmt.executeQuery(); - printWarnings(stmt.getWarnings()); - - while (rs.next()) { - ids.add(rs.getLong("rq.ID")); - } - } catch (SQLException e) { - log.error( - "PtPChunkDAO! Unable to select expired " - + "SRM_REQUEST_INPROGRESS chunks of PtP requests. {}", - e.getMessage(), e); - - } finally { - close(rs); - close(stmt); - } - return ids; - } - - /** - * Method that updates chunks in SRM_SPACE_AVAILABLE state, into SRM_SUCCESS. - * An array of long representing the primary key of each chunk is required. - * This is needed when the client invokes srmPutDone() In case of any error - * nothing happens and no exception is thrown, but proper messages get - * logged. - */ - public synchronized void transitSRM_SPACE_AVAILABLEtoSRM_SUCCESS(List ids) { - - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: transitSRM_SPACE_AVAILABLEtoSRM_SUCCESS - unable to get a valid connection!"); - return; - } - - String str = "UPDATE " - + "status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " - + "SET sp.statusCode=? " + "WHERE sp.statusCode=? AND rp.ID IN (" - + StringUtils.join(ids.toArray(), ',') + ")"; - - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - stmt.setInt(1, - statusCodeConverter.toDB(SRM_SUCCESS)); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2, statusCodeConverter.toDB(SRM_SPACE_AVAILABLE)); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - " - + "transitSRM_SPACE_AVAILABLEtoSRM_SUCCESS: {}", stmt); - - int count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - - if (count == 0) { - log.trace("PtPChunkDAO! No chunk of PtP request was " - + "transited from SRM_SPACE_AVAILABLE to SRM_SUCCESS."); - } else { - log.info("PtPChunkDAO! {} chunks of PtP requests were transited " - + "from SRM_SPACE_AVAILABLE to SRM_SUCCESS.", count); - } - } catch (SQLException e) { - log.error("PtPChunkDAO! Unable to transit chunks from " - + "SRM_SPACE_AVAILABLE to SRM_SUCCESS! {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method that updates chunks in SRM_SPACE_AVAILABLE state, into - * SRM_FILE_LIFETIME_EXPIRED. An array of Long representing the primary key - * of each chunk is required. This is needed when the client forgets to invoke - * srmPutDone(). In case of any error or exception, the returned int value - * will be zero or less than the input List size. - * - * @param the list of the request id to update - * - * @return The number of the updated records into the db - */ - public synchronized int transitExpiredSRM_SPACE_AVAILABLEtoSRM_FILE_LIFETIME_EXPIRED(Collection ids) { - - Preconditions.checkNotNull(ids, "Invalid list of id"); - - if (!checkConnection()) { - log.error("Unable to get a valid connection to the database!"); - return 0; - } - - String querySQL = "UPDATE status_Put sp " - + "JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " - + "SET sp.statusCode=?, sp.explanation=? " - + "WHERE sp.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; - - - if (!ids.isEmpty()) { - querySQL += "AND rp.ID IN (" + StringUtils.join(ids.toArray(), ',') + ")"; - } - - PreparedStatement stmt = null; - int count = 0; - try { - stmt = con.prepareStatement(querySQL); - printWarnings(con.getWarnings()); - - stmt.setInt(1, statusCodeConverter.toDB(SRM_FILE_LIFETIME_EXPIRED)); - printWarnings(stmt.getWarnings()); - - stmt.setString(2, "Expired pinLifetime"); - printWarnings(stmt.getWarnings()); - - stmt.setInt(3, statusCodeConverter.toDB(SRM_SPACE_AVAILABLE)); - printWarnings(stmt.getWarnings()); - - log.trace( - "PtP CHUNK DAO - transit SRM_SPACE_AVAILABLE to SRM_FILE_LIFETIME_EXPIRED: {}", - stmt); - - count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - - } catch (SQLException e) { - log.error( - "PtPChunkDAO! Unable to transit chunks from " - + "SRM_SPACE_AVAILABLE to SRM_FILE_LIFETIME_EXPIRED! {}", - e.getMessage(), e); - } finally { - close(stmt); - } - log.trace("PtPChunkDAO! {} chunks of PtP requests were transited " - + "from SRM_SPACE_AVAILABLE to SRM_FILE_LIFETIME_EXPIRED.", count); - return count; - } - - /** - * Method that updates enqueued requests selected by id into SRM_FAILURE. - * An array of Long representing the id of each request is required. - * - * @param the list of the request id to update - * - * @return The number of the updated records. Zero or less than the input list size in case of errors. - */ - public synchronized int transitExpiredSRM_REQUEST_INPROGRESStoSRM_FAILURE(Collection ids) { - - Preconditions.checkNotNull(ids, "Invalid list of id"); - - if (ids.isEmpty()) { - return 0; - } - - if (!checkConnection()) { - log.error("Unable to get a valid connection to the database!"); - return 0; - } - - String querySQL = "UPDATE request_queue rq, request_Put rp, status_Put sp " - + "SET rq.status=?, sp.statusCode=?, sp.explanation=? " - + "WHERE rq.ID = rp.request_queueID and rp.ID = sp.request_PutID " - + "AND rq.status=? AND rq.ID IN (" + buildInClauseForArray(ids.size()) + ")"; - - PreparedStatement stmt = null; - int count = 0; - try { - stmt = con.prepareStatement(querySQL); - printWarnings(con.getWarnings()); - - stmt.setInt(1, statusCodeConverter.toDB(SRM_FAILURE)); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2, statusCodeConverter.toDB(SRM_FAILURE)); - printWarnings(stmt.getWarnings()); - - stmt.setString(3, "Request expired"); - printWarnings(stmt.getWarnings()); - - stmt.setInt(4, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); - printWarnings(stmt.getWarnings()); - - int i = 5; - for (Long id: ids) { - stmt.setLong(i, id); - printWarnings(stmt.getWarnings()); - i++; - } - - log.trace( - "PtP CHUNK DAO - transit SRM_REQUEST_INPROGRESS to SRM_FAILURE: {}", - stmt); - - count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - - } catch (SQLException e) { - log.error( - "PtPChunkDAO! Unable to transit chunks from " - + "SRM_REQUEST_INPROGRESS to SRM_FAILURE! {}", - e.getMessage(), e); - } finally { - close(stmt); - } - log.trace("PtPChunkDAO! {} chunks of PtP requests were transited " - + "from SRM_REQUEST_INPROGRESS to SRM_FAILURE.", count); - return count; - - } - - /** - * Method that transit chunks in SRM_SPACE_AVAILABLE to SRM_ABORTED, for the - * given SURL: the overall request status of the requests containing that - * chunk, is not changed! The TURL is set to null. Beware, that the chunks may - * be part of requests that have finished, or that still have not finished - * because other chunks are still being processed. - */ - public synchronized void transitSRM_SPACE_AVAILABLEtoSRM_ABORTED( - int surlUniqueID, String surl, String explanation) { - - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: transitSRM_SPACE_AVAILABLEtoSRM_ABORTED - unable to get a valid connection!"); - return; - } - String str = "UPDATE " - + "status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " - + "SET sp.statusCode=?, sp.explanation=?, sp.transferURL=NULL " - + "WHERE sp.statusCode=? AND (rp.targetSURL_uniqueID=? OR rp.targetSURL=?)"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - stmt.setInt(1, statusCodeConverter.toDB(SRM_ABORTED)); - printWarnings(stmt.getWarnings()); - - stmt.setString(2, explanation); - printWarnings(stmt.getWarnings()); - - stmt.setInt(3, statusCodeConverter.toDB(SRM_SPACE_AVAILABLE)); - printWarnings(stmt.getWarnings()); - - stmt.setInt(4, surlUniqueID); - printWarnings(stmt.getWarnings()); - - stmt.setString(5, surl); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - " - + "transitSRM_SPACE_AVAILABLEtoSRM_ABORTED: {}", stmt); - int count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - - if (count > 0) { - log.info("PtP CHUNK DAO! {} chunks were transited from " - + "SRM_SPACE_AVAILABLE to SRM_ABORTED.", count); - } else { - log.trace("PtP CHUNK DAO! No chunks " - + "were transited from SRM_SPACE_AVAILABLE to SRM_ABORTED."); - } - } catch (SQLException e) { - log.error("PtP CHUNK DAO! Unable to " - + "transitSRM_SPACE_AVAILABLEtoSRM_ABORTED! {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Auxiliary method used to close a Statement - */ - private void close(Statement stmt) { - - if (stmt != null) { - try { - stmt.close(); - } catch (Exception e) { - log.error("PTP CHUNK DAO! Unable to close Statement {} - Error: {}", - stmt.toString(), e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to close a ResultSet - */ - private void close(ResultSet rset) { - - if (rset != null) { - try { - rset.close(); - } catch (Exception e) { - log.error("PTP CHUNK DAO! Unable to close ResultSet! Error: {}", - e.getMessage(), e); - } - } - } - - /** - * Auxiliary method that sets up the connection to the DB. - */ - private boolean setUpConnection() { - - boolean response = false; - try { - Class.forName(driver); - con = DriverManager.getConnection(url, name, password); - printWarnings(con.getWarnings()); - response = con.isValid(0); - } catch (ClassNotFoundException | SQLException e) { - log.error("PTP CHUNK DAO! Exception in setUpConnection! {}", e.getMessage(), e); - } - return response; - } - - /** - * Auxiliary method that checks if time for resetting the connection has come, - * and eventually takes it down and up back again. - */ - private boolean checkConnection() { - - boolean response = true; - if (reconnect) { - log.debug("PTP CHUNK DAO! Reconnecting to DB! "); - takeDownConnection(); - response = setUpConnection(); - if (response) { - reconnect = false; - } - } - return response; - } - - /** - * Auxiliary method that takes down a connection to the DB. - */ - private void takeDownConnection() { - - if (con != null) { - try { - con.close(); - } catch (SQLException e) { - log.error("PTP CHUNK DAO! Exception in takeDownConnection method - " - + "could not close connection! {}", e.getMessage(), e); - } - } - } - - public synchronized int updateStatus(int[] surlsUniqueIDs, String[] surls, - TStatusCode statusCode, String explanation) { - - if (explanation == null) { - throw new IllegalArgumentException("Unable to perform the updateStatus, " - + "invalid arguments: explanation=" + explanation); - } - return doUpdateStatus(null, surlsUniqueIDs, surls, statusCode, explanation, false, - true); - } - - public synchronized int updateStatus(TRequestToken requestToken, - int[] surlsUniqueIDs, String[] surls, TStatusCode statusCode, - String explanation) { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || explanation == null) { - throw new IllegalArgumentException("Unable to perform the updateStatus, " - + "invalid arguments: requestToken=" + requestToken + " explanation=" - + explanation); - } - return doUpdateStatus(requestToken, surlsUniqueIDs, surls, statusCode, - explanation, true, true); - } - - private int doUpdateStatus(TRequestToken requestToken, int[] surlsUniqueIDs, - String[] surls, TStatusCode statusCode, String explanation, - boolean withRequestToken, boolean withExplaination) - throws IllegalArgumentException { - - if ((withRequestToken && requestToken == null) - || (withExplaination && explanation == null)) { - throw new IllegalArgumentException("Unable to perform the updateStatus, " - + "invalid arguments: withRequestToken=" + withRequestToken - + " requestToken=" + requestToken + " withExplaination=" - + withExplaination + " explaination=" + explanation); - } - if (!checkConnection()) { - log - .error("PTP CHUNK DAO: updateStatus - unable to get a valid connection!"); - return 0; - } - String str = "UPDATE status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND " - + "rp.request_queueID=rq.ID " + "SET sp.statusCode=? "; - if (withExplaination) { - str += " , " + buildExpainationSet(explanation); - } - str += " WHERE "; - if (withRequestToken) { - str += buildTokenWhereClause(requestToken) + " AND "; - } - str += " ( rp.targetSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rp.targetSURL IN " - + makeSurlString(surls) + " ) "; - PreparedStatement stmt = null; - int count = 0; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - stmt.setInt(1, statusCodeConverter.toDB(statusCode)); - printWarnings(stmt.getWarnings()); - - log.trace("PTP CHUNK DAO - updateStatus: {}", stmt); - count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("PTP CHUNK DAO! No chunk of PTP request was updated to {}.", - statusCode); - } else { - log.info("PTP CHUNK DAO! {} chunks of PTP requests were updated " - + "to {}.", count, statusCode); - } - } catch (SQLException e) { - log.error("PTP CHUNK DAO! Unable to updated from to {}! {}", statusCode, - e.getMessage(), e); - } finally { - close(stmt); - } - return count; - } - - public synchronized int updateStatusOnMatchingStatus( - TRequestToken requestToken, TStatusCode expectedStatusCode, - TStatusCode newStatusCode, String explanation) { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || explanation == null) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken + " explanation=" - + explanation); - } - return doUpdateStatusOnMatchingStatus(requestToken, null, null, - expectedStatusCode, newStatusCode, explanation, true, false, true); - } - - public synchronized int updateStatusOnMatchingStatus(int[] surlsUniqueIDs, - String[] surls, TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) { - - if (surlsUniqueIDs == null || surls == null || explanation == null - || surlsUniqueIDs.length == 0 || surls.length == 0 - || surlsUniqueIDs.length != surls.length) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs + " surls=" - + surls + " explanation=" + explanation); - } - return doUpdateStatusOnMatchingStatus(null, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode, explanation, false, true, true); - } - - public synchronized int updateStatusOnMatchingStatus( - TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls, - TStatusCode expectedStatusCode, TStatusCode newStatusCode) { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || surlsUniqueIDs == null || surls == null || surlsUniqueIDs.length == 0 - || surls.length == 0 || surlsUniqueIDs.length != surls.length) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken - + "surlsUniqueIDs=" + surlsUniqueIDs + " surls=" + surls); - } - return doUpdateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode, null, true, true, false); - } - - private int doUpdateStatusOnMatchingStatus(TRequestToken requestToken, - int[] surlsUniqueIDs, String[] surls, TStatusCode expectedStatusCode, - TStatusCode newStatusCode, String explanation, boolean withRequestToken, - boolean withSurls, boolean withExplanation) { - - if ((withRequestToken && requestToken == null) - || (withExplanation && explanation == null) - || (withSurls && (surlsUniqueIDs == null || surls == null))) { - throw new IllegalArgumentException( - "Unable to perform the doUpdateStatusOnMatchingStatus, " - + "invalid arguments: withRequestToken=" + withRequestToken - + " requestToken=" + requestToken + " withSurls=" + withSurls - + " surlsUniqueIDs=" + surlsUniqueIDs + " surls=" + surls - + " withExplaination=" + withExplanation + " explanation=" - + explanation); - } - if (!checkConnection()) { - log - .error("PTP CHUNK DAO: updateStatusOnMatchingStatus - unable to get a valid connection!"); - return 0; - } - String str = "UPDATE " - + "status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " - + "SET sp.statusCode=? "; - if (withExplanation) { - str += " , " + buildExpainationSet(explanation); - } - str += " WHERE sp.statusCode=? "; - if (withRequestToken) { - str += " AND " + buildTokenWhereClause(requestToken); - } - if (withSurls) { - str += " AND " + buildSurlsWhereClause(surlsUniqueIDs, surls); - } - - int count = 0; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - stmt.setInt(1, statusCodeConverter.toDB(newStatusCode)); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2, statusCodeConverter.toDB(expectedStatusCode)); - printWarnings(stmt.getWarnings()); - - log.trace("PTP CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt); - count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("PTP CHUNK DAO! No chunk of PTP request was updated " - + "from {} to {}.", expectedStatusCode, newStatusCode); - } else { - log.debug("PTP CHUNK DAO! {} chunks of PTP requests were updated " - + "from {} to {}.", count, expectedStatusCode, newStatusCode); - } - } catch (SQLException e) { - log.error("PTP CHUNK DAO! Unable to updated from {} to {}! Error: {}", - expectedStatusCode, newStatusCode, e.getMessage(), e); - } finally { - close(stmt); - } - return count; - } - - public Collection find(int[] surlsUniqueIDs, String[] surlsArray, String dn) { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0 || dn == null) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " dn=" + dn); - } - return find(surlsUniqueIDs, surlsArray, dn, true); - } - - public Collection find(int[] surlsUniqueIDs, String[] surlsArray) { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray); - } - return find(surlsUniqueIDs, surlsArray, null, false); - } - - - private List chunkTOfromResultSet(ResultSet rs) - throws SQLException{ - - List results = Lists.newArrayList(); - while (rs.next()) { - - PtPChunkDataTO chunkDataTO = new PtPChunkDataTO(); - - chunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setPinLifetime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setFileLifetime(rs.getInt("rq.fileLifetime")); - chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); - chunkDataTO.setClientDN(rs.getString("rq.client_dn")); - - /** - * This code is only for the 1.3.18. This is a workaround to get FQANs - * using the proxy field on request_queue. The FE use the proxy field of - * request_queue to insert a single FQAN string containing all FQAN - * separated by the "#" char. The proxy is a BLOB, hence it has to be - * properly converted in string. - */ - java.sql.Blob blob = rs.getBlob("rq.proxy"); - if (!rs.wasNull() && blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - chunkDataTO.setVomsAttributes(new String(bdata)); - } - chunkDataTO.setPrimaryKey(rs.getLong("rp.ID")); - chunkDataTO.setToSURL(rs.getString("rp.targetSURL")); - - chunkDataTO.setNormalizedStFN(rs - .getString("rp.normalized_targetSURL_StFN")); - int uniqueID = rs.getInt("rp.targetSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); - } - - chunkDataTO.setExpectedFileSize(rs.getLong("rp.expectedFileSize")); - chunkDataTO.setRequestToken(rs.getString("rq.r_token")); - chunkDataTO.setStatus(rs.getInt("sp.statusCode")); - results.add(chunkDataTO); - } - - return results; - } - - - - public synchronized List findActivePtPsOnSURLs(List surls){ - - if (surls == null || surls.isEmpty()){ - throw new IllegalArgumentException("cannot find active active " - + "PtPs for an empty or null list of SURLs!"); - } - - ResultSet rs = null; - PreparedStatement stat = null; - - try { - String query = "SELECT rq.ID, rq.r_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, " - + "rq.s_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, " - + "sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " - + "WHERE ( rp.targetSURL in "+ makeSurlString((String[])surls.toArray()) +" )" - + "AND sp.statusCode = 24"; - - stat = con.prepareStatement(query); - printWarnings(con.getWarnings()); - - rs = stat.executeQuery(); - List results = chunkTOfromResultSet(rs); - - return results; - - } catch (SQLException e) { - - log.error("findActivePtPsOnSURLs(): SQL Error: {}", e.getMessage(),e); - return Collections.emptyList(); - - } finally { - close(rs); - close(stat); - } - } - - - public synchronized List findActivePtPsOnSURL(String surl) { - return findActivePtPsOnSURL(surl, null); - } - - public synchronized List findActivePtPsOnSURL(String surl, - String currentRequestToken) { - - if (surl == null || surl.isEmpty()) { - throw new IllegalArgumentException("cannot find active active " - + "PtPs for an empty or null SURL!"); - } - - ResultSet rs = null; - PreparedStatement stat = null; - - try { - - String query = "SELECT rq.ID, rq.r_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, " - + "rq.s_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, " - + "sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " - + "WHERE ( rp.targetSURL = ? and sp.statusCode=24 )"; - - if (currentRequestToken != null){ - query += "AND rq.r_token != ?"; - } - - stat = con.prepareStatement(query); - printWarnings(con.getWarnings()); - - stat.setString(1, surl); - - if (currentRequestToken != null){ - stat.setString(2, currentRequestToken); - } - - rs = stat.executeQuery(); - List results = chunkTOfromResultSet(rs); - - return results; - - } catch (SQLException e) { - - log.error("findActivePtPsOnSURL(): SQL Error: {}", e.getMessage(),e); - return Collections.emptyList(); - - } finally { - close(rs); - close(stat); - } - - } - - private synchronized Collection find(int[] surlsUniqueIDs, - String[] surlsArray, String dn, boolean withDn) - throws IllegalArgumentException { - - if ((withDn && dn == null) || surlsUniqueIDs == null - || surlsUniqueIDs.length == 0 || surlsArray == null - || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn); - } - if (!checkConnection()) { - log.error("PtP CHUNK DAO: find - unable to get a valid connection!"); - return Lists.newArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - // get chunks of the request - String str = "SELECT rq.ID, rq.r_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, " - + "rq.s_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, " - + "sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " - + "WHERE ( rp.targetSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) - + " AND rp.targetSURL IN " - + makeSurlString(surlsArray) + " )"; - - if (withDn) { - str += " AND rq.client_dn=\'" + dn + "\'"; - } - - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - List list = Lists.newArrayList(); - - log.trace("PtP CHUNK DAO - find method: {}", find); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - PtPChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new PtPChunkDataTO(); - chunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setPinLifetime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setFileLifetime(rs.getInt("rq.fileLifetime")); - chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); - chunkDataTO.setClientDN(rs.getString("rq.client_dn")); - - /** - * This code is only for the 1.3.18. This is a workaround to get FQANs - * using the proxy field on request_queue. The FE use the proxy field of - * request_queue to insert a single FQAN string containing all FQAN - * separated by the "#" char. The proxy is a BLOB, hence it has to be - * properly converted in string. - */ - java.sql.Blob blob = rs.getBlob("rq.proxy"); - if (!rs.wasNull() && blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - chunkDataTO.setVomsAttributes(new String(bdata)); - } - chunkDataTO.setPrimaryKey(rs.getLong("rp.ID")); - chunkDataTO.setToSURL(rs.getString("rp.targetSURL")); - - chunkDataTO.setNormalizedStFN(rs - .getString("rp.normalized_targetSURL_StFN")); - int uniqueID = rs.getInt("rp.targetSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); - } - - chunkDataTO.setExpectedFileSize(rs.getLong("rp.expectedFileSize")); - chunkDataTO.setRequestToken(rs.getString("rq.r_token")); - chunkDataTO.setStatus(rs.getInt("sp.statusCode")); - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTP CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return Lists.newArrayList(); - } finally { - close(rs); - close(find); - } - } - - public synchronized List findProtocols(long requestQueueId) { - - if (!checkConnection()) { - log.error("PtP CHUNK DAO: find - unable to get a valid connection!"); - return Lists.newArrayList(); - } - String str = null; - PreparedStatement find = null; - ResultSet rs = null; - try { - str = "SELECT tp.config_ProtocolsID " - + "FROM request_TransferProtocols tp " + "WHERE tp.request_queueID=?"; - - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - List protocols = Lists.newArrayList(); - find.setLong(1, requestQueueId); - printWarnings(find.getWarnings()); - - log.trace("PtP CHUNK DAO - findProtocols method: {}", find); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - while (rs.next()) { - protocols.add(rs.getString("tp.config_ProtocolsID")); - } - - return protocols; - } catch (SQLException e) { - log.error("PTP CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return Lists.newArrayList(); - } finally { - close(rs); - close(find); - } - } - - private String buildExpainationSet(String explanation) { - - return " sp.explanation='" + explanation + "' "; - } - - private String buildTokenWhereClause(TRequestToken requestToken) { - - return " rq.r_token='" + requestToken.toString() + "' "; - } - - private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) { - - return " ( rp.targetSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rp.targetSURL IN " - + makeSurlString(surls) + " ) "; - } - - /** - * Method that returns a String containing all Surl's IDs. - */ - private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) { - - StringBuilder sb = new StringBuilder("("); - for (int i = 0; i < surlUniqueIDs.length; i++) { - if (i > 0) { - sb.append(","); - } - sb.append(surlUniqueIDs[i]); - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns a String containing all Surls. - */ - private String makeSurlString(String[] surls) { - - StringBuilder sb = new StringBuilder("("); - int n = surls.length; - - for (int i = 0; i < n; i++) { - - SURL requestedSURL; - - try { - requestedSURL = SURL.makeSURLfromString(surls[i]); - } catch (NamespaceException e) { - log.error(e.getMessage(), e); - log.debug("Skip '{}' during query creation", surls[i]); - continue; - } - - sb.append("'"); - sb.append(requestedSURL.getNormalFormAsString()); - sb.append("','"); - sb.append(requestedSURL.getQueryFormAsString()); - sb.append("'"); - - if (i < (n - 1)) { - sb.append(","); - } - } - - sb.append(")"); - return sb.toString(); - } - -} \ No newline at end of file diff --git a/src/main/java/it/grid/storm/catalogs/PtPChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/PtPChunkDataTO.java deleted file mode 100644 index 754718d51..000000000 --- a/src/main/java/it/grid/storm/catalogs/PtPChunkDataTO.java +++ /dev/null @@ -1,329 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.config.Configuration; -import it.grid.storm.namespace.model.Protocol; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TStatusCode; - -import java.sql.Timestamp; -import java.util.List; - -/** - * Class that represents a row in the Persistence Layer: this is all raw data - * referring to the PtPChunkData proper, that is, String and primitive types. - * - * Each field is initialized with default values as per SRM 2.2 specification: - * protocolList GSIFTP fileStorageType VOLATILE overwriteMode NEVER status - * SRM_REQUEST_QUEUED - * - * All other fields are 0 if int, or a white space if String. - * - * @author EGRID ICTP - * @version 2.0 - * @date June 2005 - */ -public class PtPChunkDataTO { - - private static final String FQAN_SEPARATOR = "#"; - /* Database table request_Get fields BEGIN */ - private long primaryKey = -1; // ID primary key of status_Put record in DB - private String toSURL = " "; - private long expectedFileSize = 0; - private String normalizedStFN = null; - private Integer surlUniqueID = null; - /* Database table request_Get fields END */ - - private String requestToken = " "; - private int pinLifetime = -1; - private int fileLifetime = -1; - private String fileStorageType = null; // initialised in constructor - private String spaceToken = " "; - private List protocolList = null; // initialised in constructor - private String overwriteOption = null; // initialised in constructor - private int status; // initialised in constructor - private String errString = " "; - private String turl = " "; - private Timestamp timeStamp = null; - - private String clientDN = null; - private String vomsAttributes = null; - - - public PtPChunkDataTO() { - - this.fileStorageType = FileStorageTypeConverter.getInstance().toDB( - TFileStorageType.getTFileStorageType(Configuration.getInstance() - .getDefaultFileStorageType())); - TURLPrefix protocolPreferences = new TURLPrefix(); - protocolPreferences.addProtocol(Protocol.GSIFTP); - this.protocolList = TransferProtocolListConverter.toDB(protocolPreferences); - this.overwriteOption = OverwriteModeConverter.getInstance().toDB( - TOverwriteMode.NEVER); - this.status = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_QUEUED); - } - - public long primaryKey() { - - return primaryKey; - } - - public void setPrimaryKey(long n) { - - primaryKey = n; - } - - public String requestToken() { - - return requestToken; - } - - public void setRequestToken(String s) { - - requestToken = s; - } - - public Timestamp timeStamp() { - - return timeStamp; - } - - public void setTimeStamp(Timestamp timeStamp) { - - this.timeStamp = timeStamp; - } - - public String toSURL() { - - return toSURL; - } - - public void setToSURL(String s) { - - toSURL = s; - } - - /** - * @return the normalizedStFN - */ - public String normalizedStFN() { - - return normalizedStFN; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedStFN(String normalizedStFN) { - - this.normalizedStFN = normalizedStFN; - } - - /** - * @return the surlUniqueID - */ - public Integer surlUniqueID() { - - return surlUniqueID; - } - - /** - * @param surlUniqueID - * the surlUniqueID to set - */ - public void setSurlUniqueID(Integer surlUniqueID) { - - this.surlUniqueID = surlUniqueID; - } - - public int pinLifetime() { - - return pinLifetime; - } - - public void setPinLifetime(int n) { - - pinLifetime = n; - } - - public int fileLifetime() { - - return fileLifetime; - } - - public void setFileLifetime(int n) { - - fileLifetime = n; - } - - public String fileStorageType() { - - return fileStorageType; - } - - /** - * Method that sets the FileStorageType: if it is null nothing gets set. The - * deafult value is Permanent. - */ - public void setFileStorageType(String s) { - - if (s != null) - fileStorageType = s; - } - - public String spaceToken() { - - return spaceToken; - } - - public void setSpaceToken(String s) { - - spaceToken = s; - } - - public long expectedFileSize() { - - return expectedFileSize; - } - - public void setExpectedFileSize(long l) { - - expectedFileSize = l; - } - - public List protocolList() { - - return protocolList; - } - - public void setProtocolList(List l) { - - if ((l != null) && (!l.isEmpty())) - protocolList = l; - } - - public String overwriteOption() { - - return overwriteOption; - } - - /** - * Method that sets the OverwriteMode: if it is null nothing gets set. The - * deafult value is Never. - */ - public void setOverwriteOption(String s) { - - if (s != null) - overwriteOption = s; - } - - public int status() { - - return status; - } - - public void setStatus(int n) { - - status = n; - } - - public String errString() { - - return errString; - } - - public void setErrString(String s) { - - errString = s; - } - - public String transferURL() { - - return turl; - } - - public void setTransferURL(String s) { - - turl = s; - } - - public String clientDN() { - - return clientDN; - } - - public void setClientDN(String s) { - - clientDN = s; - } - - public String vomsAttributes() { - - return vomsAttributes; - } - - public void setVomsAttributes(String s) { - - vomsAttributes = s; - } - - public void setVomsAttributes(String[] fqaNsAsString) { - - vomsAttributes = ""; - for (int i = 0; i < fqaNsAsString.length; i++) { - vomsAttributes += fqaNsAsString[i]; - if (i < fqaNsAsString.length - 1) { - vomsAttributes += FQAN_SEPARATOR; - } - } - - } - - public String[] vomsAttributesArray() { - - return vomsAttributes.split(FQAN_SEPARATOR); - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(primaryKey); - sb.append(" "); - sb.append(requestToken); - sb.append(" "); - sb.append(toSURL); - sb.append(" "); - sb.append(normalizedStFN); - sb.append(" "); - sb.append(surlUniqueID); - sb.append(" "); - sb.append(pinLifetime); - sb.append(" "); - sb.append(fileLifetime); - sb.append(" "); - sb.append(fileStorageType); - sb.append(" "); - sb.append(spaceToken); - sb.append(" "); - sb.append(expectedFileSize); - sb.append(" "); - sb.append(protocolList); - sb.append(" "); - sb.append(overwriteOption); - sb.append(" "); - sb.append(status); - sb.append(" "); - sb.append(errString); - sb.append(" "); - sb.append(turl); - return sb.toString(); - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/ReducedBoLChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/ReducedBoLChunkDataTO.java deleted file mode 100644 index 4d1d3c7a9..000000000 --- a/src/main/java/it/grid/storm/catalogs/ReducedBoLChunkDataTO.java +++ /dev/null @@ -1,120 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TStatusCode; - -/** - * Class that represents some of the fields in a row in the Persistence Layer: - * this is all raw data referring to the ReducedBoLChunkData proper, that is - * String and primitive types. - * - * @author EGRID ICTP - * @version 1.0 - * @date November, 2006 - */ -public class ReducedBoLChunkDataTO { - - private long primaryKey = -1; // ID primary key of record in DB - private String fromSURL = " "; - private String normalizedStFN = null; - private Integer surlUniqueID = null; - - private int status = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_QUEUED); - private String errString = " "; - - public String errString() { - - return errString; - } - - public String fromSURL() { - - return fromSURL; - } - - public long primaryKey() { - - return primaryKey; - } - - public void setErrString(String s) { - - errString = s; - } - - public void setFromSURL(String s) { - - fromSURL = s; - } - - public void setPrimaryKey(long n) { - - primaryKey = n; - } - - public void setStatus(int n) { - - status = n; - } - - public int status() { - - return status; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedStFN(String normalizedStFN) { - - this.normalizedStFN = normalizedStFN; - } - - /** - * @return the normalizedStFN - */ - public String normalizedStFN() { - - return normalizedStFN; - } - - /** - * @param surlUniqueID - * the sURLUniqueID to set - */ - public void setSurlUniqueID(Integer surlUniqueID) { - - this.surlUniqueID = surlUniqueID; - } - - /** - * @return the sURLUniqueID - */ - public Integer surlUniqueID() { - - return surlUniqueID; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(primaryKey); - sb.append(" "); - sb.append(fromSURL); - sb.append(" "); - sb.append(normalizedStFN); - sb.append(" "); - sb.append(surlUniqueID); - sb.append(" "); - sb.append(status); - sb.append(" "); - sb.append(errString); - sb.append(" "); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/ReducedCopyChunkData.java b/src/main/java/it/grid/storm/catalogs/ReducedCopyChunkData.java deleted file mode 100644 index b2c25c40d..000000000 --- a/src/main/java/it/grid/storm/catalogs/ReducedCopyChunkData.java +++ /dev/null @@ -1,128 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; - -/** - * This class represents a ReducedCopyChunkData, that is part of a multifile - * Copy srm request. It contains data about: the requestToken, the fromSURL, the - * toSURL, return status of the file together with its error string. - * - * @author Michele Dibenedetto - */ -public class ReducedCopyChunkData { - - /* long representing the primary key for the persistence layer! */ - private long primaryKey = -1; - /* SURL from which the srmCopy will get the file */ - private TSURL fromSURL; - /* SURL to which the srmCopy will put the file */ - private TSURL toSURL; - /* Return status for this chunk of request */ - private TReturnStatus status; - - public ReducedCopyChunkData(TSURL fromSURL, TSURL toSURL, TReturnStatus status) - throws InvalidReducedCopyChunkDataAttributesException { - - if (fromSURL == null || toSURL == null || status == null) { - throw new InvalidReducedCopyChunkDataAttributesException(fromSURL, - toSURL, status); - } - - this.fromSURL = fromSURL; - this.toSURL = toSURL; - this.status = status; - } - - /** - * Method used to get the primary key used in the persistence layer! - */ - public long primaryKey() { - - return primaryKey; - } - - /** - * Method used to set the primary key to be used in the persistence layer! - */ - public void setPrimaryKey(long l) { - - primaryKey = l; - } - - /** - * Method that returns the fromSURL of the srm request to which this chunk - * belongs. - */ - public TSURL fromSURL() { - - return fromSURL; - } - - /** - * Method that returns the toSURL of the srm request to which this chunk - * belongs. - */ - public TSURL toSURL() { - - return toSURL; - } - - /** - * Method that returns the status for this chunk of the srm request. - */ - public TReturnStatus status() { - - return status; - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("CopyChunkData\n"); - sb.append("primaryKey="); - sb.append(primaryKey); - sb.append("; "); - sb.append("RequestToken="); - sb.append("fromSURL="); - sb.append(fromSURL); - sb.append("; "); - sb.append("toSURL="); - sb.append(toSURL); - sb.append("; "); - sb.append("status="); - sb.append(status); - sb.append("; "); - return sb.toString(); - } - - @Override - public int hashCode() { - - int hash = 17; - hash = 37 * hash + new Long(primaryKey).hashCode(); - hash = 37 * hash + fromSURL.hashCode(); - hash = 37 * hash + toSURL.hashCode(); - hash = 37 * hash + status.hashCode(); - return hash; - } - - @Override - public boolean equals(Object o) { - - if (o == this) { - return true; - } - if (!(o instanceof ReducedCopyChunkData)) { - return false; - } - ReducedCopyChunkData cd = (ReducedCopyChunkData) o; - return (primaryKey == cd.primaryKey) && fromSURL.equals(cd.fromSURL) - && toSURL.equals(cd.toSURL) && status.equals(cd.status); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/ReducedCopyChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/ReducedCopyChunkDataTO.java deleted file mode 100644 index 8fe6bc5bb..000000000 --- a/src/main/java/it/grid/storm/catalogs/ReducedCopyChunkDataTO.java +++ /dev/null @@ -1,175 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TStatusCode; - -/** - * Class that represents some of the fields in a row in the Persistence Layer: - * this is all raw data referring to the ReducedCopyChunkData proper, that is - * String and primitive types. - * - * All other fields are 0 if int, or a white space if String. - * - * @author Michele Dibenedetto - */ -public class ReducedCopyChunkDataTO { - - /* Database table request_Get fields BEGIN */ - private long primaryKey = -1; // ID primary key of record in DB - private String fromSURL = " "; - private String normalizedSourceStFN = null; - private Integer sourceSurlUniqueID = null; - private String toSURL = " "; - private String normalizedTargetStFN = null; - private Integer targetSurlUniqueID = null; - /* Database table request_Get fields END */ - - private int status = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_QUEUED); - private String errString = " "; - - public long primaryKey() { - - return primaryKey; - } - - public void setPrimaryKey(long n) { - - primaryKey = n; - } - - public String fromSURL() { - - return fromSURL; - } - - public void setFromSURL(String s) { - - fromSURL = s; - } - - /** - * @return the normalizedStFN - */ - public String normalizedSourceStFN() { - - return normalizedSourceStFN; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedSourceStFN(String normalizedStFN) { - - this.normalizedSourceStFN = normalizedStFN; - } - - /** - * @return the surlUniqueID - */ - public Integer sourceSurlUniqueID() { - - return sourceSurlUniqueID; - } - - /** - * @param surlUniqueID - * the surlUniqueID to set - */ - public void setSourceSurlUniqueID(Integer surlUniqueID) { - - this.sourceSurlUniqueID = surlUniqueID; - } - - public String toSURL() { - - return toSURL; - } - - public void setToSURL(String s) { - - toSURL = s; - } - - /** - * @return the normalizedStFN - */ - public String normalizedTargetStFN() { - - return normalizedTargetStFN; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedTargetStFN(String normalizedStFN) { - - this.normalizedTargetStFN = normalizedStFN; - } - - /** - * @return the surlUniqueID - */ - public Integer targetSurlUniqueID() { - - return targetSurlUniqueID; - } - - /** - * @param surlUniqueID - * the surlUniqueID to set - */ - public void setTargetSurlUniqueID(Integer surlUniqueID) { - - this.targetSurlUniqueID = surlUniqueID; - } - - public int status() { - - return status; - } - - public void setStatus(int n) { - - status = n; - } - - public String errString() { - - return errString; - } - - public void setErrString(String s) { - - errString = s; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(primaryKey); - sb.append(" "); - sb.append(fromSURL); - sb.append(" "); - sb.append(normalizedSourceStFN); - sb.append(" "); - sb.append(sourceSurlUniqueID); - sb.append(" "); - sb.append(toSURL); - sb.append(" "); - sb.append(normalizedTargetStFN); - sb.append(" "); - sb.append(targetSurlUniqueID); - sb.append(" "); - sb.append(status); - sb.append(" "); - sb.append(errString); - sb.append(" "); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/RequestSummaryCatalog.java b/src/main/java/it/grid/storm/catalogs/RequestSummaryCatalog.java index fd4e0e0da..a6ca31009 100644 --- a/src/main/java/it/grid/storm/catalogs/RequestSummaryCatalog.java +++ b/src/main/java/it/grid/storm/catalogs/RequestSummaryCatalog.java @@ -4,12 +4,32 @@ */ package it.grid.storm.catalogs; -import it.grid.storm.catalogs.timertasks.RequestsGarbageCollector; +import static it.grid.storm.srm.types.TRequestType.PREPARE_TO_GET; +import static it.grid.storm.srm.types.TRequestType.PREPARE_TO_PUT; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; + import it.grid.storm.common.types.TimeUnit; -import it.grid.storm.config.Configuration; import it.grid.storm.griduser.FQAN; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; +import it.grid.storm.persistence.converter.PinLifetimeConverter; +import it.grid.storm.persistence.converter.RequestTypeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.dao.RequestSummaryDAO; +import it.grid.storm.persistence.exceptions.InvalidRequestSummaryDataAttributesException; +import it.grid.storm.persistence.exceptions.MalformedGridUserException; +import it.grid.storm.persistence.impl.mysql.RequestSummaryDAOMySql; +import it.grid.storm.persistence.model.RequestSummaryData; +import it.grid.storm.persistence.model.RequestSummaryDataTO; import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TRequestToken; @@ -18,411 +38,315 @@ import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TStatusCode; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Timer; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.collect.Lists; - /** - * Class that represents the RequestSummaryCatalog of StoRM. The rows in the - * catalog are called RequestSummaryData. Methods are provided to: look up newly - * added requests as identified by their SRM_REQUEST_QUEUED status, to update - * the global status of the request, and to fail a request with SRM_FAILURE. + * Class that represents the RequestSummaryCatalog of StoRM. The rows in the catalog are called + * RequestSummaryData. Methods are provided to: look up newly added requests as identified by their + * SRM_REQUEST_QUEUED status, to update the global status of the request, and to fail a request with + * SRM_FAILURE. * - * @author EGRID - ICTP Trieste - * @version 2.0 - * @date April 26th, 2005 */ public class RequestSummaryCatalog { - private static final Logger log = LoggerFactory - .getLogger(RequestSummaryCatalog.class); - /** Only instance of RequestSummaryCatalog for StoRM! */ - private static RequestSummaryCatalog cat = new RequestSummaryCatalog(); - /** WARNING!!! TO BE MODIFIED WITH FACTORY!!! */ - private final RequestSummaryDAO dao = RequestSummaryDAO.getInstance(); - /** timer thread that will run a task to clean */ - private Timer clock = null; - /** configuration instance **/ - private final Configuration config = Configuration.getInstance(); - - private RequestSummaryCatalog() { - - clock = new Timer(); - - clock.schedule( - new RequestsGarbageCollector(clock, - config.getRequestPurgerPeriod() * 1000), - config.getRequestPurgerDelay() * 1000); - } - - /** - * Method that returns the only instance of RequestSummaryCatalog present in - * StoRM. - */ - public static RequestSummaryCatalog getInstance() { - - return RequestSummaryCatalog.cat; - } - - /** - * Method in charge of retrieving RequestSummaryData associated to new - * requests, that is those found in SRM_REQUETS_QUEUED global status; such - * requests then transit into SRM_SUCCESS. The actual number of fetched - * requests depends on the configured ceiling. - * - * If no new request is found, an empty Collection is returned. if a request - * is malformed, then that request is failed and an attempt is made to signal - * such occurrence in the DB. Only correctly formed requests are returned. - */ - synchronized public Collection fetchNewRequests( - int capacity) { - - List list = Lists.newArrayList(); - - Collection c = dao.findNew(capacity); - if (c == null || c.isEmpty()) { - return list; - } - int fetched = c.size(); - log.debug("REQUEST SUMMARY CATALOG: {} new requests picked up.", fetched); - for (RequestSummaryDataTO auxTO : c) { - RequestSummaryData aux = null; - try { - aux = makeOne(auxTO); - } catch (IllegalArgumentException e) { - log.error("REQUEST SUMMARY CATALOG: Failure while performing makeOne " - + "operation. IllegalArgumentException: {}", e.getMessage(), e); - continue; - } - if (aux != null) { - log.debug("REQUEST SUMMARY CATALOG: {} associated to {} included " - + "for processing", aux.requestToken(), aux.gridUser().getDn()); - list.add(aux); - } - } - int ret = list.size(); - if (ret < fetched) { - log.warn("REQUEST SUMMARY CATALOG: including {} requests for processing, " - + "since the dropped ones were malformed!", ret); - } else { - log.debug("REQUEST SUMMARY CATALOG: including for processing all {} " - + "requests.", ret); - } - if (!list.isEmpty()) { - log.debug("REQUEST SUMMARY CATALOG: returning {}\n\n", list); - } - return list; - } - - /** - * Private method used to create a RequestSummaryData object, from a - * RequestSummaryDataTO. If a chunk cannot be created, an error messagge gets - * logged and an attempt is made to signal in the DB that the request is - * malformed. - */ - private RequestSummaryData makeOne(RequestSummaryDataTO to) - throws IllegalArgumentException { - - TRequestType auxrtype = RequestTypeConverter.getInstance().toSTORM( - to.requestType()); - if (auxrtype == TRequestType.EMPTY) { - StringBuilder sb = new StringBuilder(); - sb.append("TRequestType could not be created from its String representation "); - sb.append(to.requestType()); - sb.append("\n"); - log.warn(sb.toString()); - throw new IllegalArgumentException( - "Invalid TRequestType in the provided RequestSummaryDataTO"); - } - TRequestToken auxrtoken; - try { - auxrtoken = new TRequestToken(to.requestToken(), to.timestamp()); - } catch (InvalidTRequestTokenAttributesException e) { - log.warn("Unable to create TRequestToken from RequestSummaryDataTO. " - + "InvalidTRequestTokenAttributesException: {}", e.getMessage()); - throw new IllegalArgumentException( - "Unable to create TRequestToken from RequestSummaryDataTO."); - } - GridUserInterface auxgu; - - try { - auxgu = loadVomsGridUser(to.clientDN(), to.vomsAttributes()); - } catch (MalformedGridUserException e) { - StringBuilder sb = new StringBuilder(); - sb.append("VomsGridUser could not be created from DN String "); - sb.append(to.clientDN()); - sb.append(" voms attributes String "); - sb.append(to.vomsAttributes()); - sb.append(" and from request token String "); - sb.append(to.requestToken()); - log.warn("{}. MalformedGridUserException: {}", sb.toString(), e.getMessage()); - throw new IllegalArgumentException( - "Unable to load Voms Grid User from RequestSummaryDataTO. " - + "MalformedGridUserException: " + e.getMessage()); - } - RequestSummaryData data = null; - try { - data = new RequestSummaryData(auxrtype, auxrtoken, auxgu); - data.setPrimaryKey(to.primaryKey()); - } catch (InvalidRequestSummaryDataAttributesException e) { - dao.failRequest(to.primaryKey(), "The request data is malformed!"); - log.warn("REQUEST SUMMARY CATALOG! Unable to create RequestSummaryData. " - + "InvalidRequestSummaryDataAttributesException: {}", e.getMessage(), e); - throw new IllegalArgumentException("Unable to reate RequestSummaryData"); - } - TReturnStatus status = null; - if (to.getStatus() != null) { - TStatusCode code = StatusCodeConverter.getInstance().toSTORM(to.getStatus()); - if (code == TStatusCode.EMPTY) { - log.warn("RequestSummaryDataTO retrieved StatusCode was not " - + "recognised: {}", to.getStatus()); - } else { - status = new TReturnStatus(code, to.getErrstring()); - } - } - data.setUserToken(to.getUserToken()); - data.setRetrytime(to.getRetrytime()); - if (to.getPinLifetime() != null) { - data.setPinLifetime(TLifeTimeInSeconds.make(PinLifetimeConverter - .getInstance().toStoRM(to.getPinLifetime()), TimeUnit.SECONDS)); - } - data.setSpaceToken(to.getSpaceToken()); - data.setStatus(status); - data.setErrstring(to.getErrstring()); - data.setRemainingTotalTime(to.getRemainingTotalTime()); - data.setNbreqfiles(to.getNbreqfiles()); - data.setNumOfCompleted(to.getNumOfCompleted()); - if (to.getFileLifetime() != null) { - data.setFileLifetime(TLifeTimeInSeconds.make(to.getFileLifetime(), - TimeUnit.SECONDS)); - } - - data.setDeferredStartTime(to.getDeferredStartTime()); - data.setNumOfWaiting(to.getNumOfWaiting()); - data.setNumOfFailed(to.getNumOfFailed()); - data.setRemainingDeferredStartTime(to.getRemainingDeferredStartTime()); - return data; - } - - /** - * Private method that holds the logic for creating a VomsGridUser from - * persistence and to load any available Proxy. For the moment the VOMS - * attributes present in persistence are NOT loaded! - */ - private GridUserInterface loadVomsGridUser(String dn, String fqansString) throws MalformedGridUserException { - - log.debug("load VomsGridUser for dn='{}' and fqansString='{}'", dn, fqansString); - - if (dn == null) { - throw new MalformedGridUserException("Invalid null DN"); - } - if (fqansString == null || fqansString.isEmpty()) { - return GridUserManager.makeGridUser(dn); - } - - FQAN[] fqans = new FQAN[fqansString.split("#").length]; - int i = 0; - for (String fqan: fqansString.split("#")) { - fqans[i++] = new FQAN(fqan); - } - try { - return GridUserManager.makeVOMSGridUser(dn, fqans); - } catch (IllegalArgumentException e) { - log.error("Unexpected error on voms grid user creation. " - + "IllegalArgumentException: {}", e.getMessage(), e); - throw new MalformedGridUserException(e.getMessage()); - } - } - - /** - * Method used to update the global status of a request identified by - * TRequestToken, to the supplied TReturnStatus. In case of any exception - * nothing happens. - */ - synchronized public void updateGlobalStatus(TRequestToken rt, - TReturnStatus status) { - - dao.updateGlobalStatus(rt.toString(), StatusCodeConverter.getInstance() - .toDB(status.getStatusCode()), status.getExplanation()); - } - - public void updateFromPreviousGlobalStatus(TRequestToken requestToken, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) { - - dao.updateGlobalStatusOnMatchingGlobalStatus(requestToken, - expectedStatusCode, newStatusCode, explanation); - } - - /** - * Method used to update the global status of a request identified by - * TRequestToken, to the supplied TReturnStatus. The pin lifetime and the file - * lifetime are updated in order to start the countdown from the moment the - * status is updated. In case of any exception nothing happens. - */ - synchronized public void updateGlobalStatusPinFileLifetime(TRequestToken rt, - TReturnStatus status) { - - dao.updateGlobalStatusPinFileLifetime(rt.toString(), StatusCodeConverter - .getInstance().toDB(status.getStatusCode()), status.getExplanation()); - } - - /** - * Method used to change the global status of the supplied request to - * SRM_FAILURE, as well as that of each single chunk in the request. If the - * request type is not supported by the logic, only the global status is - * updated and an error log gets written warning of the unsupported business - * logic. - * - * If the supplied RequestSummaryData is null, nothing gets done; if any DB - * error occurs, no exception gets thrown but proper messagges get logged. - */ - synchronized public void failRequest(RequestSummaryData rsd, - String explanation) { - - if (rsd != null) { - TRequestType rtype = rsd.requestType(); - if (rtype == TRequestType.PREPARE_TO_GET) { - dao.failPtGRequest(rsd.primaryKey(), explanation); - } else if (rtype == TRequestType.PREPARE_TO_PUT) { - dao.failPtPRequest(rsd.primaryKey(), explanation); - } else if (rtype == TRequestType.COPY) { - dao.failCopyRequest(rsd.primaryKey(), explanation); - } else { - dao.failRequest(rsd.primaryKey(), explanation); - } - } - } - - /** - * Method used to abort a request that has not yet been fetched for - * processing; if the status of the request associated to the supplied request - * token tok is different from SRM_REQUEST_QUEUED, then nothing takes place; - * likewise if the supplied token does not correspond to any request, or if it - * is null. - */ - synchronized public void abortRequest(TRequestToken rt) { - - if (rt != null) { - dao.abortRequest(rt.toString()); - } - } - - /** - * Method used to abort a request that has not yet been fetched for - * processing; abort is only applied to those SURLs of the request specified - * in the Collection; if the status of the request associated to the supplied - * request token is different from SRM_REQUEST_QUEUED, then nothing takes - * place; likewise if the supplied token does not correspond to any request, - * if it is null, if the Collection is null, or the Collection does not - * contain TSURLs. - */ - synchronized public void abortChunksOfRequest(TRequestToken rt, - Collection c) { - - if ((rt != null) && (c != null) && (!c.isEmpty())) { - try { - ArrayList aux = new ArrayList(); - for (TSURL tsurl : c) { - aux.add(tsurl.toString()); - } - dao.abortChunksOfRequest(rt.toString(), aux); - } catch (ClassCastException e) { - log.error("REQUEST SUMMARY CATALOG! Unexpected error in " - + "abortChunksOfRequest: the supplied Collection did not contain " - + "TSURLs! Error: {}", e.getMessage(), e); - } - } - } - - /** - * Method used to abort a request that HAS been fetched for processing; abort - * is only applied to those SURLs of the request specified in the Collection; - * if the status of the request associated to the supplied request token is - * different from SRM_REQUEST_INPROGRESS, then nothing takes place; likewise - * if the supplied token does not correspond to any request, if it is null, if - * the Collection is null, or the Collection does not contain TSURLs. - */ - synchronized public void abortChunksOfInProgressRequest(TRequestToken rt, - Collection tsurls) { - - if ((rt != null) && (tsurls != null) && (!tsurls.isEmpty())) { - try { - List aux = new ArrayList(); - for (TSURL tsurl : tsurls) { - aux.add(tsurl.toString()); - } - dao.abortChunksOfInProgressRequest(rt.toString(), aux); - } catch (ClassCastException e) { - log.error("REQUEST SUMMARY CATALOG! Unexpected error in " - + "abortChunksOfInProgressRequest: the supplied Collection did not " - + "contain TSURLs! Error: {}", e.getMessage()); - } - } - } - - synchronized public RequestSummaryData find(TRequestToken requestToken) - throws IllegalArgumentException { - - if (requestToken == null || requestToken.toString().trim().isEmpty()) { - throw new IllegalArgumentException( - "Unable to perform find, illegal arguments: requestToken=" - + requestToken); - } - RequestSummaryDataTO to = dao.find(requestToken.toString()); - if (to != null) { - try { - RequestSummaryData data = makeOne(to); - if (data != null) { - log.debug("REQUEST SUMMARY CATALOG: {} associated to {} retrieved", - data.requestToken(), data.gridUser().getDn()); - return data; - } - } catch (IllegalArgumentException e) { - log.error("REQUEST SUMMARY CATALOG; Failure performing makeOne operation. " - + "IllegalArgumentException: {}", e.getMessage(), e); - } - } else { - log.debug("REQUEST SUMMARY CATALOG: {} token not found", requestToken); - } - return null; - } - - /** - * Method that returns the TRequestType associated to the request with the - * supplied TRequestToken. If no request exists with that token, or the type - * cannot be established from the DB, or the supplied token is null, then an - * EMPTY TRequestType is returned. - */ - synchronized public TRequestType typeOf(TRequestToken rt) { - - TRequestType result = TRequestType.EMPTY; - String type = null; - if (rt != null) { - type = dao.typeOf(rt.toString()); - if (type != null && !type.isEmpty()) - result = RequestTypeConverter.getInstance().toSTORM(type); - } - return result; - } - - /** - * Method used to abort a request that HAS been fetched for processing; if the - * status of the request associated to the supplied request token tok is - * different from SRM_REQUEST_INPROGRESS, then nothing takes place; likewise - * if the supplied token does not correspond to any request, or if it is null. - */ - synchronized public void abortInProgressRequest(TRequestToken rt) { - - if (rt != null) { - dao.abortInProgressRequest(rt.toString()); - } - } + private static final Logger log = LoggerFactory.getLogger(RequestSummaryCatalog.class); + + private static RequestSummaryCatalog instance; + private final RequestSummaryDAO dao; + + public static synchronized RequestSummaryCatalog getInstance() { + if (instance == null) { + instance = new RequestSummaryCatalog(); + } + return instance; + } + + private RequestSummaryCatalog() { + dao = RequestSummaryDAOMySql.getInstance(); + } + + /** + * Method in charge of retrieving RequestSummaryData associated to new requests, that is those + * found in SRM_REQUETS_QUEUED global status; such requests then transit into SRM_SUCCESS. The + * actual number of fetched requests depends on the configured ceiling. + * + * If no new request is found, an empty Collection is returned. if a request is malformed, then + * that request is failed and an attempt is made to signal such occurrence in the DB. Only + * correctly formed requests are returned. + */ + synchronized public Collection fetchNewRequests(int capacity) { + + List list = Lists.newArrayList(); + + Collection c = dao.fetchNewRequests(capacity); + if (c == null || c.isEmpty()) { + return list; + } + int fetched = c.size(); + log.debug("REQUEST SUMMARY CATALOG: {} new requests picked up.", fetched); + for (RequestSummaryDataTO auxTO : c) { + RequestSummaryData aux = null; + try { + aux = makeOne(auxTO); + } catch (IllegalArgumentException e) { + log.error("REQUEST SUMMARY CATALOG: Failure while performing makeOne " + + "operation. IllegalArgumentException: {}", e.getMessage(), e); + continue; + } + if (aux != null) { + log.debug("REQUEST SUMMARY CATALOG: {} associated to {} included " + "for processing", + aux.requestToken(), aux.gridUser().getDn()); + list.add(aux); + } + } + int ret = list.size(); + if (ret < fetched) { + log.warn("REQUEST SUMMARY CATALOG: including {} requests for processing, " + + "since the dropped ones were malformed!", ret); + } else { + log.debug("REQUEST SUMMARY CATALOG: including for processing all {} " + "requests.", ret); + } + if (!list.isEmpty()) { + log.debug("REQUEST SUMMARY CATALOG: returning {}\n\n", list); + } + return list; + } + + /** + * Private method used to create a RequestSummaryData object, from a RequestSummaryDataTO. If a + * chunk cannot be created, an error message gets logged and an attempt is made to signal in the + * DB that the request is malformed. + */ + private RequestSummaryData makeOne(RequestSummaryDataTO to) throws IllegalArgumentException { + + TRequestType auxrtype = RequestTypeConverter.getInstance().toSTORM(to.requestType()); + if (auxrtype == TRequestType.EMPTY) { + StringBuilder sb = new StringBuilder(); + sb.append("TRequestType could not be created from its String representation "); + sb.append(to.requestType()); + sb.append("\n"); + log.warn(sb.toString()); + throw new IllegalArgumentException( + "Invalid TRequestType in the provided RequestSummaryDataTO"); + } + TRequestToken auxrtoken; + try { + auxrtoken = new TRequestToken(to.requestToken(), to.timestamp()); + } catch (InvalidTRequestTokenAttributesException e) { + log.warn("Unable to create TRequestToken from RequestSummaryDataTO. " + + "InvalidTRequestTokenAttributesException: {}", e.getMessage()); + throw new IllegalArgumentException( + "Unable to create TRequestToken from RequestSummaryDataTO."); + } + GridUserInterface auxgu; + + try { + auxgu = loadVomsGridUser(to.clientDN(), to.vomsAttributes()); + } catch (MalformedGridUserException e) { + StringBuilder sb = new StringBuilder(); + sb.append("VomsGridUser could not be created from DN String "); + sb.append(to.clientDN()); + sb.append(" voms attributes String "); + sb.append(to.vomsAttributes()); + sb.append(" and from request token String "); + sb.append(to.requestToken()); + log.warn("{}. MalformedGridUserException: {}", sb.toString(), e.getMessage()); + throw new IllegalArgumentException("Unable to load Voms Grid User from RequestSummaryDataTO. " + + "MalformedGridUserException: " + e.getMessage()); + } + RequestSummaryData data = null; + try { + data = new RequestSummaryData(auxrtype, auxrtoken, auxgu); + data.setPrimaryKey(to.primaryKey()); + } catch (InvalidRequestSummaryDataAttributesException e) { + dao.failRequest(to.primaryKey(), "The request data is malformed!"); + log.warn("REQUEST SUMMARY CATALOG! Unable to create RequestSummaryData. " + + "InvalidRequestSummaryDataAttributesException: {}", e.getMessage(), e); + throw new IllegalArgumentException("Unable to reate RequestSummaryData"); + } + TReturnStatus status = null; + if (to.getStatus() != null) { + TStatusCode code = StatusCodeConverter.getInstance().toSTORM(to.getStatus()); + if (code == TStatusCode.EMPTY) { + log.warn("RequestSummaryDataTO retrieved StatusCode was not " + "recognised: {}", + to.getStatus()); + } else { + status = new TReturnStatus(code, to.getErrstring()); + } + } + data.setUserToken(to.getUserToken()); + data.setRetrytime(to.getRetrytime()); + if (to.getPinLifetime() != null) { + data.setPinLifetime(TLifeTimeInSeconds + .make(PinLifetimeConverter.getInstance().toStoRM(to.getPinLifetime()), TimeUnit.SECONDS)); + } + data.setSpaceToken(to.getSpaceToken()); + data.setStatus(status); + data.setErrstring(to.getErrstring()); + data.setRemainingTotalTime(to.getRemainingTotalTime()); + data.setNbreqfiles(to.getNbreqfiles()); + data.setNumOfCompleted(to.getNumOfCompleted()); + if (to.getFileLifetime() != null) { + data.setFileLifetime(TLifeTimeInSeconds.make(to.getFileLifetime(), TimeUnit.SECONDS)); + } + + data.setDeferredStartTime(to.getDeferredStartTime()); + data.setNumOfWaiting(to.getNumOfWaiting()); + data.setNumOfFailed(to.getNumOfFailed()); + data.setRemainingDeferredStartTime(to.getRemainingDeferredStartTime()); + return data; + } + + /** + * Private method that holds the logic for creating a VomsGridUser from persistence and to load + * any available proxy. For the moment the VOMS attributes present in persistence are NOT loaded! + */ + private GridUserInterface loadVomsGridUser(String dn, String fqansString) + throws MalformedGridUserException { + + log.debug("load VomsGridUser for dn='{}' and fqansString='{}'", dn, fqansString); + + if (dn == null) { + throw new MalformedGridUserException("Invalid null DN"); + } + if (fqansString == null || fqansString.isEmpty()) { + return GridUserManager.makeGridUser(dn); + } + + FQAN[] fqans = new FQAN[fqansString.split("#").length]; + int i = 0; + for (String fqan : fqansString.split("#")) { + fqans[i++] = new FQAN(fqan); + } + try { + return GridUserManager.makeVOMSGridUser(dn, fqans); + } catch (IllegalArgumentException e) { + log.error("Unexpected error on voms grid user creation. " + "IllegalArgumentException: {}", + e.getMessage(), e); + throw new MalformedGridUserException(e.getMessage()); + } + } + + /** + * Method used to update the global status of a request identified by TRequestToken, to the + * supplied TReturnStatus. In case of any exception nothing happens. + */ + synchronized public void updateGlobalStatus(TRequestToken rt, TReturnStatus status) { + + dao.updateGlobalStatus(rt, status.getStatusCode(), status.getExplanation()); + } + + public void updateFromPreviousGlobalStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation) { + + dao.updateGlobalStatusOnMatchingGlobalStatus(requestToken, expectedStatusCode, newStatusCode, + explanation); + } + + /** + * Method used to update the global status of a request identified by TRequestToken, to the + * supplied TReturnStatus. The pin lifetime and the file lifetime are updated in order to start + * the count-down from the moment the status is updated. In case of any exception nothing happens. + */ + synchronized public void updateGlobalStatusPinFileLifetime(TRequestToken rt, + TReturnStatus status) { + + dao.updateGlobalStatusPinFileLifetime(rt, status.getStatusCode(), status.getExplanation()); + } + + /** + * Method used to change the global status of the supplied request to SRM_FAILURE, as well as that + * of each single chunk in the request. If the request type is not supported by the logic, only + * the global status is updated and an error log gets written warning of the unsupported business + * logic. + */ + public synchronized void failRequest(RequestSummaryData rsd, String explanation) { + + Preconditions.checkNotNull(rsd); + TRequestType rtype = rsd.requestType(); + if (PREPARE_TO_GET.equals(rtype)) { + dao.failPtGRequest(rsd.primaryKey(), explanation); + } else if (PREPARE_TO_PUT.equals(rtype)) { + dao.failPtPRequest(rsd.primaryKey(), explanation); + } else { + dao.failRequest(rsd.primaryKey(), explanation); + } + } + + /** + * Method used to abort a request that HAS been fetched for processing; abort is only applied to + * those SURLs of the request specified in the Collection; if the status of the request associated + * to the supplied request token is different from SRM_REQUEST_INPROGRESS, then nothing takes + * place; likewise if the supplied token does not correspond to any request, if it is null, if the + * Collection is null, or the Collection does not contain TSURLs. + */ + synchronized public void abortChunksOfInProgressRequest(TRequestToken rt, + Collection tsurls) { + + if ((rt != null) && (tsurls != null) && (!tsurls.isEmpty())) { + try { + List aux = new ArrayList(); + for (TSURL tsurl : tsurls) { + aux.add(tsurl.toString()); + } + dao.abortChunksOfInProgressRequest(rt, aux); + } catch (ClassCastException e) { + log.error("REQUEST SUMMARY CATALOG! Unexpected error in " + + "abortChunksOfInProgressRequest: the supplied Collection did not " + + "contain TSURLs! Error: {}", e.getMessage()); + } + } + } + + synchronized public RequestSummaryData find(TRequestToken requestToken) + throws IllegalArgumentException { + + if (requestToken == null || requestToken.toString().trim().isEmpty()) { + throw new IllegalArgumentException( + "Unable to perform find, illegal arguments: requestToken=" + requestToken); + } + RequestSummaryDataTO to = dao.find(requestToken); + if (to != null) { + try { + RequestSummaryData data = makeOne(to); + if (data != null) { + log.debug("REQUEST SUMMARY CATALOG: {} associated to {} retrieved", data.requestToken(), + data.gridUser().getDn()); + return data; + } + } catch (IllegalArgumentException e) { + log.error("REQUEST SUMMARY CATALOG; Failure performing makeOne operation. " + + "IllegalArgumentException: {}", e.getMessage(), e); + } + } else { + log.debug("REQUEST SUMMARY CATALOG: {} token not found", requestToken); + } + return null; + } + + /** + * Method that returns the TRequestType associated to the request with the supplied TRequestToken. + * If no request exists with that token, or the type cannot be established from the DB, or the + * supplied token is null, then an EMPTY TRequestType is returned. + */ + synchronized public TRequestType typeOf(TRequestToken rt) { + + TRequestType result = TRequestType.EMPTY; + if (rt != null) { + result = dao.getRequestType(rt); + } + return result; + } + + /** + * Method used to abort a request that HAS been fetched for processing; if the status of the + * request associated to the supplied request token tok is different from SRM_REQUEST_INPROGRESS, + * then nothing takes place; likewise if the supplied token does not correspond to any request, or + * if it is null. + */ + synchronized public void abortInProgressRequest(TRequestToken rt) { + + if (rt != null) { + dao.abortInProgressRequest(rt); + } + } } diff --git a/src/main/java/it/grid/storm/catalogs/RequestSummaryDAO.java b/src/main/java/it/grid/storm/catalogs/RequestSummaryDAO.java deleted file mode 100644 index ad52e5f7c..000000000 --- a/src/main/java/it/grid/storm/catalogs/RequestSummaryDAO.java +++ /dev/null @@ -1,1377 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import com.google.common.collect.Lists; - -import it.grid.storm.config.Configuration; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TRequestType; -import it.grid.storm.srm.types.TStatusCode; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.Timer; -import java.util.TimerTask; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * DAO class for RequestSummaryCatalog. This DAO is specifically designed to - * connect to a MySQL DB. - * - * @author EGRID ICTP - * @version 3.0 - * @date May 2005 - */ -public class RequestSummaryDAO { - - private static final Logger log = LoggerFactory - .getLogger(RequestSummaryDAO.class); - - /** String with the name of the class for the DB driver */ - private final String driver = Configuration.getInstance().getDBDriver(); - /** String referring to the URL of the DB */ - private final String url = Configuration.getInstance().getStormDbURL(); - /** String with the password for the DB */ - private final String password = Configuration.getInstance().getDBPassword(); - /** String with the name for the DB */ - private final String name = Configuration.getInstance().getDBUserName(); - /** maximum number of requests that will be retrieved */ - private int limit; - /** Connection to DB - WARNING!!! It is kept open all the time! */ - private Connection con = null; - - /** milliseconds that must pass before reconnecting to DB */ - private final long period = Configuration.getInstance() - .getDBReconnectPeriod() * 1000; - /** initial delay in milliseconds before starting timer */ - private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000; - /** timer thread that will run a task to alert when reconnecting is necessary! */ - private Timer clock = null; - /** - * timer task that will update the boolean signaling that a reconnection is - * needed! - */ - private TimerTask clockTask = null; - /** boolean that tells whether reconnection is needed because of MySQL bug! */ - private boolean reconnect = false; - - private static final RequestSummaryDAO dao = new RequestSummaryDAO(); - - private RequestSummaryDAO() { - - int aux = Configuration.getInstance().getPickingMaxBatchSize(); - if (aux > 1) { - limit = aux; - } else { - limit = 1; - } - setUpConnection(); - clock = new Timer(); - clockTask = new TimerTask() { - - @Override - public void run() { - - reconnect = true; - } - }; // clock task - clock.scheduleAtFixedRate(clockTask, delay, period); - } - - /** - * Method that returns the only instance of the RequestSummaryDAO. - */ - public static RequestSummaryDAO getInstance() { - - return dao; - } - - /** - * Method that retrieves requests in the SRM_REQUEST_QUEUED status: retrieved - * requests are limited to the number specified by the Configuration method - * getPicker2MaxBatchSize. All retrieved requests get their global status - * transited to SRM_REQUEST_INPROGRESS. A Collection of RequestSummaryDataTO - * is returned: if none are found, an empty collection is returned. - */ - public Collection findNew(int freeSlot) { - - PreparedStatement stmt = null; - ResultSet rs = null; - List list = Lists.newArrayList(); - - if (!checkConnection()) { - log - .error("REQUEST SUMMARY DAO - findNew: unable to get a valid connection!"); - return list; - } - // RequestSummaryDataTO - try { - // start transaction - con.setAutoCommit(false); - - int howMuch = -1; - if (freeSlot > limit) { - howMuch = limit; - } else { - howMuch = freeSlot; - } - - String query = "SELECT ID, config_RequestTypeID, r_token, timeStamp, " - + "client_dn, proxy FROM request_queue WHERE status=? LIMIT ?"; - - // get id, request type, request token and client_DN of newly added - // requests, which must be in SRM_REQUEST_QUEUED state - stmt = con.prepareStatement(query); - logWarnings(con.getWarnings()); - - stmt.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED)); - stmt.setInt(2, howMuch); - - rs = stmt.executeQuery(); - logWarnings(stmt.getWarnings()); - - List rowids = new ArrayList(); // arraylist with selected ids - RequestSummaryDataTO aux = null; // RequestSummaryDataTO made from - // retrieved row - long auxid; // primary key of retrieved row - while (rs.next()) { - auxid = rs.getLong("ID"); - rowids.add(Long.valueOf(auxid)); - aux = new RequestSummaryDataTO(); - aux.setPrimaryKey(auxid); - aux.setRequestType(rs.getString("config_RequestTypeID")); - aux.setRequestToken(rs.getString("r_token")); - aux.setClientDN(rs.getString("client_dn")); - aux.setTimestamp(rs.getTimestamp("timeStamp")); - - /** - * This code is only for the 1.3.18. This is a workaround to get FQANs - * using the proxy field on request_queue. The FE use the proxy field of - * request_queue to insert a single FQAN string containing all FQAN - * separated by the "#" char. The proxy is a BLOB, hence it has to be - * properly converted in string. - */ - java.sql.Blob blob = rs.getBlob("proxy"); - if (blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - aux.setVomsAttributes(new String(bdata)); - } - - list.add(aux); - } - close(rs); - close(stmt); - - // transit state from SRM_REQUEST_QUEUED to SRM_REQUEST_INPROGRESS - if (!list.isEmpty()) { - logWarnings(con.getWarnings()); - String where = makeWhereString(rowids); - String update = "UPDATE request_queue SET status=" - + StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_INPROGRESS) + ", errstring=?" - + " WHERE ID IN " + where; - stmt = con.prepareStatement(update); - logWarnings(stmt.getWarnings()); - stmt.setString(1, "Request handled!"); - logWarnings(stmt.getWarnings()); - log.trace("REQUEST SUMMARY DAO - findNew: executing {}", stmt); - stmt.executeUpdate(); - close(stmt); - } - - // commit and finish transaction - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); - logWarnings(con.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - findNew: Unable to complete picking. " - + "Error: {}. Rolling back!", e.getMessage(), e); - } finally { - close(rs); - close(stmt); - } - // return collection of requests - if (!list.isEmpty()) { - log.debug("REQUEST SUMMARY DAO - findNew: returning {}", list); - } - return list; - } - - /** - * Method used to signal in the DB that a request failed: the status of the - * request identified by the primary key index is transited to SRM_FAILURE, - * with the supplied explanation String. The supplied index is the primary key - * of the global request. In case of any error, nothing gets done and no - * exception is thrown, but proper error messages get logged. - */ - public void failRequest(long index, String explanation) { - - if (!checkConnection()) { - log - .error("REQUEST SUMMARY DAO - failRequest: unable to get a valid connection!"); - return; - } - String signalSQL = "UPDATE request_queue r " + "SET r.status=" - + StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FAILURE) - + ", r.errstring=? " + "WHERE r.ID=?"; - PreparedStatement signal = null; - try { - signal = con.prepareStatement(signalSQL); - logWarnings(con.getWarnings()); - signal.setString(1, explanation); // Prepared statement spares - // DB-specific String notation! - logWarnings(signal.getWarnings()); - signal.setLong(2, index); - logWarnings(signal.getWarnings()); - log.trace("REQUEST SUMMARY DAO! failRequest executing: {}", signal); - signal.executeUpdate(); - logWarnings(signal.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO! Unable to transit request identified by " - + "ID {} to SRM_FAILURE! Error: {}", index, e.getMessage(), e); - } finally { - close(signal); - } - } - - /** - * Method used to signal in the DB that a PtGRequest failed. The global status - * transits to SRM_FAILURE, as well as that of each chunk associated to the - * request. The supplied explanation string is used both for the global status - * as well as for each individual chunk. The supplied index is the primary key - * of the global request. In case of any error, nothing gets done and no - * exception is thrown, but proper error messages get logged. - */ - public void failPtGRequest(long index, String explanation) { - - if (!checkConnection()) { - log - .error("REQUEST SUMMARY DAO - failPtGRequest: unable to get a valid connection!"); - return; - } - String requestSQL = "UPDATE request_queue r " - + "SET r.status=?, r.errstring=? " + "WHERE r.ID=?"; - String chunkSQL = "UPDATE " - + "status_Get s JOIN (request_queue r, request_Get g) ON s.request_GetID=g.ID AND g.request_queueID=r.ID " - + "SET s.statusCode=?, s.explanation=? " + "WHERE r.ID=?"; - PreparedStatement request = null; - PreparedStatement chunk = null; - int failCode = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_FAILURE); - try { - // start transaction - con.setAutoCommit(false); - - // update global status - request = con.prepareStatement(requestSQL); - logWarnings(con.getWarnings()); - request.setInt(1, failCode); - logWarnings(request.getWarnings()); - request.setString(2, explanation); // Prepared statement spares - // DB-specific String notation! - logWarnings(request.getWarnings()); - request.setLong(3, index); - logWarnings(request.getWarnings()); - log.trace("REQUEST SUMMARY DAO! failPtGRequest executing: {}", request); - request.executeUpdate(); - logWarnings(request.getWarnings()); - - // update each chunk status - chunk = con.prepareStatement(chunkSQL); - logWarnings(con.getWarnings()); - chunk.setInt(1, failCode); - logWarnings(chunk.getWarnings()); - chunk.setString(2, explanation); // Prepared statement spares - // DB-specific String notation! - logWarnings(chunk.getWarnings()); - chunk.setLong(3, index); - logWarnings(chunk.getWarnings()); - log.trace("REQUEST SUMMARY DAO! failPtGRequest executing: {}", chunk); - chunk.executeUpdate(); - logWarnings(chunk.getWarnings()); - - // commit and finish transaction - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); - logWarnings(con.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO! Unable to transit PtG request identified " - + "by ID {} to SRM_FAILURE! Error: {}\nRolling back...", index, - e.getMessage(), e); - rollback(con); - } finally { - close(request); - close(chunk); - } - } - - /** - * Method used to signal in the DB that a PtPRequest failed. The global status - * transits to SRM_FAILURE, as well as that of each chunk associated to the - * request. The supplied explanation string is used both for the global status - * as well as for each individual chunk. The supplied index is the primary key - * of the global request. In case of any error, nothing gets done and no - * exception is thrown, but proper error messagges get logged. - */ - public void failPtPRequest(long index, String explanation) { - - if (!checkConnection()) { - log - .error("REQUEST SUMMARY DAO - failPtPRequest: unable to get a valid connection!"); - return; - } - String requestSQL = "UPDATE request_queue r " - + "SET r.status=?, r.errstring=? " + "WHERE r.ID=?"; - String chunkSQL = "UPDATE " - + "status_Put s JOIN (request_queue r, request_Put p) ON s.request_PutID=p.ID AND p.request_queueID=r.ID " - + "SET s.statusCode=?, s.explanation=? " + "WHERE r.ID=?"; - PreparedStatement request = null; - PreparedStatement chunk = null; - int failCode = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_FAILURE); - try { - // start transaction - con.setAutoCommit(false); - - // update global status - request = con.prepareStatement(requestSQL); - logWarnings(con.getWarnings()); - request.setInt(1, failCode); - logWarnings(request.getWarnings()); - request.setString(2, explanation); // Prepared statement spares - // DB-specific String notation! - logWarnings(request.getWarnings()); - request.setLong(3, index); - logWarnings(request.getWarnings()); - log.trace("REQUEST SUMMARY DAO! failPtPRequest executing: {}", request); - request.executeUpdate(); - logWarnings(request.getWarnings()); - - // update each chunk status - chunk = con.prepareStatement(chunkSQL); - logWarnings(con.getWarnings()); - chunk.setInt(1, failCode); - logWarnings(chunk.getWarnings()); - chunk.setString(2, explanation); // Prepared statement spares - // DB-specific String notation! - logWarnings(chunk.getWarnings()); - chunk.setLong(3, index); - logWarnings(chunk.getWarnings()); - log.trace("REQUEST SUMMARY DAO! failPtPRequest executing: {}", chunk); - chunk.executeUpdate(); - logWarnings(chunk.getWarnings()); - - // commit and finish transaction - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); - logWarnings(con.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO! Unable to transit PtP request identified " - + "by ID {} to SRM_FAILURE! Error: {}\nRolling back...", index, - e.getMessage(), e); - rollback(con); - } finally { - close(request); - close(chunk); - } - } - - /** - * Method used to signal in the DB that a CopyRequest failed. The global - * status transits to SRM_FAILURE, as well as that of each chunk associated to - * the request. The supplied explanation string is used both for the global - * status as well as for each individual chunk. The supplied index is the - * primary key of the global request. In case of any error, nothing gets done - * and no exception is thrown, but proper error messagges get logged. - */ - public void failCopyRequest(long index, String explanation) { - - if (!checkConnection()) { - log - .error("REQUEST SUMMARY DAO - failCopyRequest: unable to get a valid connection!"); - return; - } - String requestSQL = "UPDATE request_queue r " - + "SET r.status=?, r.errstring=? " + "WHERE r.ID=?"; - String chunkSQL = "UPDATE " - + "status_Copy s JOIN (request_queue r, request_Copy c) ON s.request_CopyID=c.ID AND c.request_queueID=r.ID " - + "SET s.statusCode=?, s.explanation=? " + "WHERE r.ID=?"; - PreparedStatement request = null; - PreparedStatement chunk = null; - int failCode = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_FAILURE); - try { - // start transaction - con.setAutoCommit(false); - - // update global status - request = con.prepareStatement(requestSQL); - logWarnings(con.getWarnings()); - request.setInt(1, failCode); - logWarnings(request.getWarnings()); - request.setString(2, explanation); // Prepared statement spares - // DB-specific String notation! - logWarnings(request.getWarnings()); - request.setLong(3, index); - logWarnings(request.getWarnings()); - log.trace("REQUEST SUMMARY DAO! failCopyRequest executing: {}", request); - request.executeUpdate(); - logWarnings(request.getWarnings()); - - // update each chunk status - chunk = con.prepareStatement(chunkSQL); - logWarnings(con.getWarnings()); - chunk.setInt(1, failCode); - logWarnings(chunk.getWarnings()); - chunk.setString(2, explanation); // Prepared statement spares - // DB-specific String notation! - logWarnings(chunk.getWarnings()); - chunk.setLong(3, index); - logWarnings(chunk.getWarnings()); - log.trace("REQUEST SUMMARY DAO! failCopyRequest executing: {}", chunk); - chunk.executeUpdate(); - logWarnings(chunk.getWarnings()); - - // commit and finish transaction - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); - logWarnings(con.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO! Unable to transit Copy request identified " - + "by ID {} to SRM_FAILURE! Error: {}\nRolling back...", index, - e.getMessage(), e); - rollback(con); - } finally { - close(request); - close(chunk); - } - } - - /** - * Method used to update the global status of the request identified by the - * RequestToken rt. It gets updated the supplied status, with the supplied - * explanation String. If the supplied request token does not exist, nothing - * happens. - */ - public void updateGlobalStatus(String rt, int status, String explanation) { - - if (!checkConnection()) { - log - .error("REQUEST SUMMARY DAO - updateGlobalStatus: unable to get a valid connection!"); - return; - } - PreparedStatement update = null; - try { - update = con - .prepareStatement("UPDATE request_queue SET status=?, errstring=? WHERE r_token=?"); - logWarnings(con.getWarnings()); - update.setInt(1, status); - logWarnings(update.getWarnings()); - update.setString(2, explanation); - logWarnings(update.getWarnings()); - update.setString(3, rt); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - updateGlobalStatus: executing {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO: {}", e.getMessage(), e); - } finally { - close(update); - } - } - - public void updateGlobalStatusOnMatchingGlobalStatus( - TRequestToken requestToken, TStatusCode expectedStatusCode, - TStatusCode newStatusCode, String explanation) { - - if (!checkConnection()) { - log - .error("REQUEST SUMMARY DAO - updateGlobalStatusOnMatchingGlobalStatus: " - + "unable to get a valid connection!"); - return; - } - PreparedStatement update = null; - try { - update = con - .prepareStatement("UPDATE request_queue SET status=?, errstring=? WHERE r_token=? AND status=?"); - logWarnings(con.getWarnings()); - update.setInt(1, StatusCodeConverter.getInstance().toDB(newStatusCode)); - logWarnings(update.getWarnings()); - update.setString(2, explanation); - logWarnings(update.getWarnings()); - update.setString(3, requestToken.toString()); - logWarnings(update.getWarnings()); - update.setInt(4, - StatusCodeConverter.getInstance().toDB(expectedStatusCode)); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - updateGlobalStatusOnMatchingGlobalStatus: " - + "executing {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO: {}", e.getMessage(), e); - } finally { - close(update); - } - } - - /** - * Method used to update the global status of the request identified by the - * RequestToken rt. It gets updated the supplied status, with the supplied - * explanation String and pin and file lifetimes are updated in order to start - * the countdown from now. If the supplied request token does not exist, - * nothing happens. - */ - public void updateGlobalStatusPinFileLifetime(String rt, int status, - String explanation) { - - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - updateGlobalStatusPinFileLifetime: " - + "unable to get a valid connection!"); - return; - } - PreparedStatement update = null; - - String query = "UPDATE request_queue SET status=?, errstring=?, " - + "pinLifetime=pinLifetime+(UNIX_TIMESTAMP()-UNIX_TIMESTAMP(timeStamp)) " - + "WHERE r_token=?"; - - try { - update = con.prepareStatement(query); - logWarnings(con.getWarnings()); - - update.setInt(1, status); - logWarnings(update.getWarnings()); - - update.setString(2, explanation); - logWarnings(update.getWarnings()); - - update.setString(3, rt); - logWarnings(update.getWarnings()); - - log.trace("REQUEST SUMMARY DAO - updateGlobalStatus: executing {}", update); - - update.executeUpdate(); - logWarnings(update.getWarnings()); - - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO: {}", e.getMessage(), e); - } finally { - close(update); - } - } - - /** - * Method used to transit the status of a request that is in - * SRM_REQUEST_QUEUED state, to SRM_ABORTED. All files associated with the - * request will also get their status changed to SRM_ABORTED. If the supplied - * token is null, or not found, or not in the SRM_REQUEST_QUEUED state, then - * nothing happens. - */ - public void abortRequest(String rt) { - - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - abortRequest: unable to get a valid connection!"); - return; - } - PreparedStatement update = null; - PreparedStatement query = null; - ResultSet rs = null; - try { - query = con - .prepareStatement("SELECT ID,config_RequestTypeID FROM request_queue WHERE r_token=? AND status=?"); - logWarnings(con.getWarnings()); - query.setString(1, rt); - logWarnings(query.getWarnings()); - query.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED)); - logWarnings(query.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortRequest - {}", query); - rs = query.executeQuery(); - logWarnings(query.getWarnings()); - if (rs.next()) { - long id = rs.getLong("ID"); - String type = rs.getString("config_RequestTypeID"); - update = con - .prepareStatement("UPDATE request_queue SET status=?, errstring=? WHERE ID=?"); - logWarnings(con.getWarnings()); - update.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(update.getWarnings()); - update.setString(2, "User aborted request!"); - logWarnings(update.getWarnings()); - update.setLong(3, id); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortRequest - {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - close(update); - // update single chunk file statuses - TRequestType rtyp = RequestTypeConverter.getInstance().toSTORM(type); - String statusTable = null; - String requestTable = null; - String joinColumn = null; - if (rtyp != TRequestType.EMPTY) { - if (rtyp == TRequestType.PREPARE_TO_GET) { - statusTable = "status_Get"; - requestTable = "request_Get"; - joinColumn = "request_GetID"; - } else if (rtyp == TRequestType.PREPARE_TO_PUT) { - requestTable = "request_Put"; - statusTable = "status_Put"; - joinColumn = "request_PutID"; - } else if (rtyp == TRequestType.COPY) { - requestTable = "request_Copy"; - statusTable = "status_Copy"; - joinColumn = "request_CopyID"; - } else { - requestTable = "request_BoL"; - statusTable = "status_BoL"; - joinColumn = "request_BoLID"; - } - String auxstr = "UPDATE " + statusTable - + " s JOIN (request_queue r, " + requestTable + " t) ON (s." - + joinColumn + "=t.ID AND t.request_queueID=r.ID) " - + "SET s.statusCode=?, s.explanation=? " + "WHERE r.ID=?"; - update = con.prepareStatement(auxstr); - logWarnings(con.getWarnings()); - update.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(update.getWarnings()); - update.setString(2, "User aborted request!"); - logWarnings(update.getWarnings()); - update.setLong(3, id); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortRequest - {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - } else { - log.error("REQUEST SUMMARY DAO - Unable to complete abortRequest: " - + "could not update file statuses because the request type could " - + "not be translated from the DB!"); - } - } - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - abortRequest: {}", e.getMessage(), e); - } finally { - close(rs); - close(query); - close(update); - } - } - - /** - * Method used to transit the status of a request that is in - * SRM_REQUEST_INPROGRESS state, to SRM_ABORTED. All files associated with the - * request will also get their status changed to SRM_ABORTED. If the supplied - * token is null, or not found, or not in the SRM_REQUEST_INPROGRESS state, - * then nothing happens. - */ - public void abortInProgressRequest(String rt) { - - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - abortInProgressRequest: unable to get " - + "a valid connection!"); - return; - } - PreparedStatement update = null; - PreparedStatement query = null; - ResultSet rs = null; - try { - query = con - .prepareStatement("SELECT ID,config_RequestTypeID FROM request_queue WHERE r_token=? AND status=?"); - logWarnings(con.getWarnings()); - query.setString(1, rt); - logWarnings(query.getWarnings()); - query.setInt(2, StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_INPROGRESS)); - logWarnings(query.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortInProgressRequest - {}", query); - rs = query.executeQuery(); - logWarnings(query.getWarnings()); - if (rs.next()) { - // token found... - // get ID - long id = rs.getLong("ID"); - String type = rs.getString("config_RequestTypeID"); - // update global request status - update = con - .prepareStatement("UPDATE request_queue SET status=?, errstring=? WHERE ID=?"); - logWarnings(con.getWarnings()); - update.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(update.getWarnings()); - update.setString(2, "User aborted request!"); - logWarnings(update.getWarnings()); - update.setLong(3, id); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortInProgressRequest - {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - close(update); - // update single chunk file statuses - TRequestType rtyp = RequestTypeConverter.getInstance().toSTORM(type); - String statusTable = null; - String requestTable = null; - String joinColumn = null; - if (rtyp != TRequestType.EMPTY) { - if (rtyp == TRequestType.PREPARE_TO_GET) { - requestTable = "request_Get"; - statusTable = "status_Get"; - joinColumn = "request_GetID"; - } else if (rtyp == TRequestType.PREPARE_TO_PUT) { - requestTable = "request_Put"; - statusTable = "status_Put"; - joinColumn = "request_PutID"; - } else if (rtyp == TRequestType.COPY) { - requestTable = "request_Copy"; - statusTable = "status_Copy"; - joinColumn = "request_CopyID"; - } else { - requestTable = "request_BoL"; - statusTable = "status_BoL"; - joinColumn = "request_BoLID"; - } - String auxstr = "UPDATE " + statusTable - + " s JOIN (request_queue r, " + requestTable + " t ON s." - + joinColumn + "=t.ID AND t.request_queueID=r.ID )" - + "SET s.statusCode=?, s.explanation=? " + "WHERE r.ID=?"; - update = con.prepareStatement(auxstr); - logWarnings(con.getWarnings()); - update.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(update.getWarnings()); - update.setString(2, "User aborted request!"); - logWarnings(update.getWarnings()); - update.setLong(3, id); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortInProgressRequest - {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - } else { - log.error("REQUEST SUMMARY DAO - Unable to complete " - + "abortInProgressRequest: could not update file statuses because " - + "the request type could not be translated from the DB!"); - } - } - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - abortInProgressRequest: {}", - e.getMessage(), e); - } finally { - close(rs); - close(query); - close(update); - } - } - - /** - * Method used to transit the status of chunks of a request that is in - * SRM_REQUEST_QUEUED state, to SRM_ABORTED. If the supplied token is null, or - * not found, or not in the SRM_REQUEST_QUEUED state, then nothing happens. - */ - public void abortChunksOfRequest(String rt, Collection surls) { - - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - abortChunksOfRequest: unable to get a valid connection!"); - return; - } - PreparedStatement update = null; - PreparedStatement query = null; - ResultSet rs = null; - try { - query = con - .prepareStatement("SELECT ID,config_RequestTypeID FROM request_queue WHERE r_token=? AND status=?"); - logWarnings(con.getWarnings()); - query.setString(1, rt); - logWarnings(query.getWarnings()); - query.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED)); - logWarnings(query.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortChunksOfRequest - {}", query); - rs = query.executeQuery(); - logWarnings(query.getWarnings()); - if (rs.next()) { - long id = rs.getLong("ID"); - String type = rs.getString("config_RequestTypeID"); - // update single chunk file statuses - TRequestType rtyp = RequestTypeConverter.getInstance().toSTORM(type); - String statusTable = null; - String requestTable = null; - String joinColumn = null; - String surlColumn = null; - if (rtyp != TRequestType.EMPTY) { - if (rtyp == TRequestType.PREPARE_TO_GET) { - requestTable = "request_Get"; - statusTable = "status_Get"; - joinColumn = "request_GetID"; - surlColumn = "sourceSURL"; - } else if (rtyp == TRequestType.PREPARE_TO_PUT) { - requestTable = "request_Put"; - statusTable = "status_Put"; - joinColumn = "request_PutID"; - surlColumn = "targetSURL"; - } else if (rtyp == TRequestType.COPY) { - requestTable = "request_Copy"; - statusTable = "status_Copy"; - joinColumn = "request_CopyID"; - surlColumn = "targetSURL"; - } else { - requestTable = "request_BoL"; - statusTable = "status_BoL"; - joinColumn = "request_BoLID"; - surlColumn = "sourceSURL"; - } - String auxstr = "UPDATE " + statusTable - + " s JOIN (request_queue r, " + requestTable + " t ON s." - + joinColumn + "=t.ID AND t.request_queueID=r.ID " - + "SET s.statusCode=?, s.explanation=? " + "WHERE r.ID=? AND " - + surlColumn + " IN " + makeInString(surls); - update = con.prepareStatement(auxstr); - logWarnings(con.getWarnings()); - update.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(update.getWarnings()); - update.setString(2, "User aborted request!"); - logWarnings(update.getWarnings()); - update.setLong(3, id); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortChunksOfRequest - {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - } else { - log.error("REQUEST SUMMARY DAO - Unable to complete " - + "abortChunksOfRequest: could not update file statuses because " - + "the request type could not be translated from the DB!"); - } - } - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - abortChunksOfRequest: {}", - e.getMessage(), e); - } finally { - close(rs); - close(query); - close(update); - } - } - - /** - * Method used to transit the status of chunks of a request that is in - * SRM_REQUEST_INPROGRESS state, to SRM_ABORTED. If the supplied token is - * null, or not found, or not in the SRM_REQUEST_INPROGRESS state, then - * nothing happens. - */ - public void abortChunksOfInProgressRequest(String rt, Collection surls) { - - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - abortChunksOfInProgressRequest: unable " - + "to get a valid connection!"); - return; - } - PreparedStatement update = null; - PreparedStatement query = null; - ResultSet rs = null; - try { - query = con - .prepareStatement("SELECT ID,config_RequestTypeID FROM request_queue WHERE r_token=? AND status=?"); - logWarnings(con.getWarnings()); - query.setString(1, rt); - logWarnings(query.getWarnings()); - query.setInt(2, StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_INPROGRESS)); - logWarnings(query.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortChunksOfInProgressRequest - {}", query); - rs = query.executeQuery(); - logWarnings(query.getWarnings()); - if (rs.next()) { - long id = rs.getLong("ID"); - String type = rs.getString("config_RequestTypeID"); - // update single chunk file statuses - TRequestType rtyp = RequestTypeConverter.getInstance().toSTORM(type); - String statusTable = null; - String requestTable = null; - String joinColumn = null; - String surlColumn = null; - if (rtyp != TRequestType.EMPTY) { - if (rtyp == TRequestType.PREPARE_TO_GET) { - requestTable = "request_Get"; - statusTable = "status_Get"; - joinColumn = "request_GetID"; - surlColumn = "sourceSURL"; - } else if (rtyp == TRequestType.PREPARE_TO_PUT) { - requestTable = "request_Put"; - statusTable = "status_Put"; - joinColumn = "request_PutID"; - surlColumn = "targetSURL"; - } else if (rtyp == TRequestType.COPY) { - requestTable = "request_Copy"; - statusTable = "status_Copy"; - joinColumn = "request_CopyID"; - surlColumn = "targetSURL"; - } else { - requestTable = "request_BoL"; - statusTable = "status_BoL"; - joinColumn = "request_BoLID"; - surlColumn = "sourceSURL"; - } - String auxstr = "UPDATE " + statusTable - + " s JOIN (request_queue r, " + requestTable + " t ON s." - + joinColumn + "=t.ID AND t.request_queueID=r.ID " - + "SET s.statusCode=?, s.explanation=? " + "WHERE r.ID=? AND " - + surlColumn + " IN " + makeInString(surls); - update = con.prepareStatement(auxstr); - logWarnings(con.getWarnings()); - update.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(update.getWarnings()); - update.setString(2, "User aborted request!"); - logWarnings(update.getWarnings()); - update.setLong(3, id); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortChunksOfInProgressRequest " - + "- {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - } else { - log.error("REQUEST SUMMARY DAO - Unable to complete " - + "abortChunksOfInProgressRequest: could not update file statuses " - + "because the request type could not be translated from the DB!"); - } - } - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - abortChunksOfInProgressRequest: {}", - e.getMessage(), e); - } finally { - close(rs); - close(query); - close(update); - } - } - - /** - * Private method that returns a String of all SURLS in the collection of - * String. - */ - private String makeInString(Collection c) { - - StringBuilder sb = new StringBuilder("("); - for (Iterator i = c.iterator(); i.hasNext();) { - sb.append(i.next()); - if (i.hasNext()) { - sb.append(","); - } - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns the config_RequestTypeID field present in request_queue - * table, for the request with the specified request token rt. In case of any - * error, the empty String "" is returned. - */ - public String typeOf(String rt) { - - PreparedStatement query = null; - ResultSet rs = null; - String result = ""; - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - typeOf: unable to get a valid connection!"); - return result; - } - try { - query = con - .prepareStatement("SELECT config_RequestTypeID from request_queue WHERE r_token=?"); - logWarnings(con.getWarnings()); - query.setString(1, rt); - logWarnings(query.getWarnings()); - log.trace("REQUEST SUMMARY DAO - typeOf - {}", query); - rs = query.executeQuery(); - logWarnings(query.getWarnings()); - if (rs.next()) { - result = rs.getString("config_RequestTypeID"); - } - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - typeOf - {}", e.getMessage(), e); - } finally { - close(rs); - close(query); - } - return result; - } - - /** - * Method that returns the config_RequestTypeID field present in request_queue - * table, for the request with the specified request token rt. In case of any - * error, the empty String "" is returned. - */ - public RequestSummaryDataTO find(String rt) { - - PreparedStatement query = null; - ResultSet rs = null; - RequestSummaryDataTO to = null; - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - find: unable to get a valid connection!"); - return null; - } - try { - query = con - .prepareStatement("SELECT * from request_queue WHERE r_token=?"); - logWarnings(con.getWarnings()); - query.setString(1, rt); - con.setAutoCommit(false); - - rs = query.executeQuery(); - logWarnings(query.getWarnings()); - if (!rs.first()) { - log.debug("No requests found with token {}", rt); - return null; - } - to = new RequestSummaryDataTO(); - to.setPrimaryKey(rs.getLong("ID")); - to.setRequestType(rs.getString("config_RequestTypeID")); - to.setClientDN(rs.getString("client_dn")); - to.setUserToken(rs.getString("u_token")); - to.setRetrytime(rs.getInt("retrytime")); - to.setPinLifetime(rs.getInt("pinLifetime")); - to.setSpaceToken(rs.getString("s_token")); - to.setStatus(rs.getInt("status")); - to.setErrstring(rs.getString("errstring")); - to.setRequestToken(rs.getString("r_token")); - to.setRemainingTotalTime(rs.getInt("remainingTotalTime")); - to.setFileLifetime(rs.getInt("fileLifetime")); - to.setNbreqfiles(rs.getInt("nbreqfiles")); - to.setNumOfCompleted(rs.getInt("numOfCompleted")); - to.setNumOfWaiting(rs.getInt("numOfWaiting")); - to.setNumOfFailed(rs.getInt("numOfFailed")); - to.setTimestamp(rs.getTimestamp("timeStamp")); - - - java.sql.Blob blob = rs.getBlob("proxy"); - if (blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - to.setVomsAttributes(new String(bdata)); - } - to.setDeferredStartTime(rs.getInt("deferredStartTime")); - to.setRemainingDeferredStartTime(rs.getInt("remainingDeferredStartTime")); - - if (rs.next()) { - log.warn("More than a row matches token {}", rt); - } - close(rs); - close(query); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - find - {}", e.getMessage(), e); - } finally { - close(rs); - close(query); - } - return to; - } - - /** - * Method that purges expired requests: it only removes up to a fixed value of - * expired requests at a time. The value is configured and obtained from the - * configuration property getPurgeBatchSize. A List of Strings with the - * request tokens removed is returned. In order to completely remove all - * expired requests, simply keep invoking this method until an empty List is - * returned. This batch processing is needed because there could be millions - * of expired requests which are likely to result in out-of-memory problems. - * Notice that in case of errors only error messages get logged. An empty List - * is also returned. - */ - public List purgeExpiredRequests(long expiredRequestTime, int purgeSize) { - - PreparedStatement ps = null; - ResultSet rs = null; - List requestTokens = Lists.newArrayList(); - List ids = Lists.newArrayList(); - - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - purgeExpiredRequests: unable to get a " - + "valid connection!"); - return requestTokens; - } - - try { - // start transaction - con.setAutoCommit(false); - String stmt = "SELECT ID, r_token FROM request_queue WHERE UNIX_TIMESTAMP(NOW()) - UNIX_TIMESTAMP(timeStamp) > ? AND status <> ? AND status <> ? LIMIT ?"; - ps = con.prepareStatement(stmt); - ps.setLong(1, expiredRequestTime); - ps.setInt(2, StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED)); - ps.setInt(3, StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_INPROGRESS)); - ps.setInt(4, purgeSize); - logWarnings(con.getWarnings()); - log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - {}", ps); - - rs = ps.executeQuery(); - logWarnings(ps.getWarnings()); - - while (rs.next()) { - requestTokens.add(rs.getString("r_token")); - ids.add(new Long(rs.getLong("ID"))); - } - - close(rs); - close(ps); - - if (!ids.isEmpty()) { - // REMOVE BATCH OF EXPIRED REQUESTS! - stmt = "DELETE FROM request_queue WHERE ID in " + makeWhereString(ids); - - ps = con.prepareStatement(stmt); - logWarnings(con.getWarnings()); - log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - {}", stmt); - - int deleted = ps.executeUpdate(); - logWarnings(ps.getWarnings()); - if (deleted > 0) { - log.info("REQUEST SUMMARY DAO - purgeExpiredRequests - Deleted {} " - + "expired requests.", deleted); - } else { - log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - No deleted " - + "expired requests."); - } - - close(ps); - - stmt = "DELETE request_DirOption FROM request_DirOption " - + " LEFT JOIN request_Get ON request_DirOption.ID = request_Get.request_DirOptionID" - + " LEFT JOIN request_BoL ON request_DirOption.ID = request_BoL.request_DirOptionID " - + " LEFT JOIN request_Copy ON request_DirOption.ID = request_Copy.request_DirOptionID" - + " WHERE request_Copy.request_DirOptionID IS NULL AND" - + " request_Get.request_DirOptionID IS NULL AND" - + " request_BoL.request_DirOptionID IS NULL;"; - - ps = con.prepareStatement(stmt); - logWarnings(con.getWarnings()); - log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - {}", stmt); - deleted = ps.executeUpdate(); - logWarnings(ps.getWarnings()); - - if (deleted > 0) { - log.info("REQUEST SUMMARY DAO - purgeExpiredRequests - Deleted {} " - + "DirOption related to expired requests.", deleted); - } else { - log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - No Deleted " - + "DirOption related to expired requests."); - } - close(ps); - - } - // commit and finish transaction - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); - logWarnings(con.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - purgeExpiredRequests - Rolling back " - + "because of error: {}", e.getMessage(), e); - rollback(con); - } finally { - close(rs); - close(ps); - } - return requestTokens; - } - - /** - * Retrieve the total number of expired requests. - * - * @return - */ - public int getNumberExpired() { - - int rowCount = 0; - - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - getNumberExpired: unable to get a " - + "valid connection!"); - return 0; - } - - PreparedStatement ps = null; - ResultSet rs = null; - - try { - // start transaction - con.setAutoCommit(false); - - String stmt = "SELECT count(*) FROM request_queue WHERE UNIX_TIMESTAMP(NOW()) - UNIX_TIMESTAMP(timeStamp) > ? AND status <> ? AND status <> ? "; - ps = con.prepareStatement(stmt); - ps.setLong(1, Configuration.getInstance().getExpiredRequestTime()); - ps.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED)); - ps.setInt(3, StatusCodeConverter.getInstance() - .toDB(TStatusCode.SRM_REQUEST_INPROGRESS)); - - logWarnings(con.getWarnings()); - log.trace("REQUEST SUMMARY DAO - Number of expired requests: {}", ps); - rs = ps.executeQuery(); - logWarnings(ps.getWarnings()); - - // Get the number of rows from the result set - rs.next(); - rowCount = rs.getInt(1); - log.debug("Nr of expired requests is: {}", rowCount); - - close(rs); - close(ps); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - purgeExpiredRequests - Rolling back " - + "because of error: {}", e.getMessage(), e); - rollback(con); - } finally { - close(rs); - close(ps); - } - - return rowCount; - - } - - /** - * Private method that returns a String of all IDs retrieved by the last - * SELECT. - */ - private String makeWhereString(List rowids) { - - StringBuilder sb = new StringBuilder("("); - for (Iterator i = rowids.iterator(); i.hasNext();) { - sb.append(i.next()); - if (i.hasNext()) { - sb.append(","); - } - } - sb.append(")"); - return sb.toString(); - } - - /** - * Auxiliary method that sets up the connection to the DB, as well as the - * prepared statement. - */ - private boolean setUpConnection() { - - boolean response = false; - try { - Class.forName(driver); - con = DriverManager.getConnection(url, name, password); - logWarnings(con.getWarnings()); - response = con.isValid(0); - } catch (ClassNotFoundException | SQLException e) { - log.error("REQUEST SUMMARY DAO! Exception in setUpConnection! {}", e.getMessage(), e); - } - return response; - } - - /** - * Auxiliary method that tales down a connection to the DB. - */ - private void takeDownConnection() { - - if (con != null) { - try { - con.close(); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO! Exception in takeDownConnection " - + "method: {}", e.getMessage(), e); - } - } - } - - /** - * Auxiliary method that checks if time for resetting the connection has come, - * and eventually takes it down and up back again. - */ - private boolean checkConnection() { - - boolean response = true; - if (reconnect) { - takeDownConnection(); - response = setUpConnection(); - if (response) { - reconnect = false; - } - } - return response; - } - - /** - * Auxiliary method used to close a Statement - */ - private void close(Statement stmt) { - - if (stmt != null) { - try { - stmt.close(); - } catch (Exception e) { - log.error("REQUEST SUMMARY DAO! Unable to close Statement {} - " - + "Error: {}", stmt.toString(), e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to close a ResultSet - */ - private void close(ResultSet rset) { - - if (rset != null) { - try { - rset.close(); - } catch (Exception e) { - log.error("REQUEST SUMMARY DAO! Unable to close ResultSet! Error: {}", - e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to roll back a transaction - */ - private void rollback(Connection con) { - - if (con != null) { - try { - con.rollback(); - logWarnings(con.getWarnings()); - log.error("PICKER2: roll back successful!"); - } catch (SQLException e2) { - log.error("PICKER2: roll back failed! {}", e2.getMessage(), e2); - } - } - } - - /** - * Private auxiliary method used to log SQLWarnings. - */ - private void logWarnings(SQLWarning warning) { - - if (warning != null) { - log.debug("REQUEST SUMMARY DAO: {}", warning); - while ((warning = warning.getNextWarning()) != null) { - log.debug("REQUEST SUMMARY DAO: {}", warning); - } - } - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/ReservedSpaceCatalog.java b/src/main/java/it/grid/storm/catalogs/ReservedSpaceCatalog.java index d1664cda9..92563c8e6 100644 --- a/src/main/java/it/grid/storm/catalogs/ReservedSpaceCatalog.java +++ b/src/main/java/it/grid/storm/catalogs/ReservedSpaceCatalog.java @@ -2,684 +2,458 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -/* - * ReservedSpaceCatalog - */ - package it.grid.storm.catalogs; -import it.grid.storm.griduser.GridUserInterface; -import it.grid.storm.persistence.DAOFactory; -import it.grid.storm.persistence.PersistenceDirector; -import it.grid.storm.persistence.dao.StorageSpaceDAO; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.model.StorageSpaceTO; -import it.grid.storm.persistence.model.TransferObjectDecodingException; -import it.grid.storm.space.StorageSpaceData; -import it.grid.storm.srm.types.ArrayOfTSpaceToken; -import it.grid.storm.srm.types.InvalidTSpaceTokenAttributesException; -import it.grid.storm.srm.types.TSpaceToken; - import java.io.File; import java.util.Calendar; import java.util.Collection; import java.util.Date; -import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; -import java.util.LinkedList; import java.util.List; +import java.util.Map; +import java.util.Set; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; -/** - * - */ +import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.dao.StorageSpaceDAO; +import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.impl.mysql.StorageSpaceDAOMySql; +import it.grid.storm.persistence.model.StorageSpaceTO; +import it.grid.storm.persistence.model.TransferObjectDecodingException; +import it.grid.storm.space.StorageSpaceData; +import it.grid.storm.srm.types.ArrayOfTSpaceToken; +import it.grid.storm.srm.types.InvalidTSpaceTokenAttributesException; +import it.grid.storm.srm.types.TSpaceToken; public class ReservedSpaceCatalog { - private static final Logger log = LoggerFactory - .getLogger(ReservedSpaceCatalog.class); - private static HashSet voSA_spaceTokenSet = new HashSet(); - private static HashMap voSA_UpdateTime = new HashMap(); - - private static final long NOT_INITIALIZED_SIZE_VALUE = -1L; - - private final DAOFactory daoFactory; - private StorageSpaceDAO ssDAO; - - /********************************************* - * STATIC METHODS - *********************************************/ - public static void addSpaceToken(TSpaceToken token) { - - voSA_spaceTokenSet.add(token); - voSA_UpdateTime.put(token, null); - } - - public static HashSet getTokenSet() { - - return voSA_spaceTokenSet; - } - - public static void clearTokenSet() { - - voSA_spaceTokenSet.clear(); - voSA_UpdateTime.clear(); - } - - public static void setUpdateTime(TSpaceToken token, Date updateTime) { - - if (voSA_UpdateTime.containsKey(token)) { - voSA_UpdateTime.put(token, updateTime); - } else { - log.warn("Failing while Trying to set update time in Catalog cache."); - } - } - - public static Date getUpdateTime(TSpaceToken token) { - - Date result = null; - if (voSA_UpdateTime.containsKey(token)) { - result = voSA_UpdateTime.get(token); - } else { - log.warn("Failing while Trying to set update time in Catalog cache."); - } - return result; - } - - /********************************************* - * CLASS METHODS - *********************************************/ - /** - * Default constructor - */ - public ReservedSpaceCatalog() { - - log.debug("Building Reserve Space Catalog..."); - // Binding to the persistence component - daoFactory = PersistenceDirector.getDAOFactory(); - } - - /** - * Basic method used to retrieve all the information about a StorageSpace - - * StorageSpace is selected by SpaceToken - * - * @param spaceToken - * TSpaceToken - * @return StorageSpaceData, null if no-one SS exists with the specified - * spaceToken - * @throws DataAccessException - */ - public StorageSpaceData getStorageSpace(TSpaceToken spaceToken) - throws TransferObjectDecodingException, DataAccessException { - - StorageSpaceData result = null; - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - StorageSpaceTO ssTO = ssDAO.getStorageSpaceByToken(spaceToken.getValue()); - log.debug("Storage Space retrieved by Token. "); - if (ssTO != null) { - try { - result = new StorageSpaceData(ssTO); - } catch (IllegalArgumentException e) { - log.error("Error building StorageSpaceData from StorageSpaceTO " - + "IllegalArgumentException: {}", e.getLocalizedMessage(), e); - throw new TransferObjectDecodingException( - "Unable to build StorageSpaceData from StorageSpaceTO"); - } - } else { - log.info("Unable to build StorageSpaceData. No StorageSpaceTO built " - + "from the DB"); - } - return result; - } - - /** - * Create a new StorageSpace entry into the DB. It is used for - STATIC Space - * Creation - DYNAMIC Space Reservation - * - * @param ssd - * @throws NoDataFoundException - * @throws InvalidRetrievedDataException - * @throws MultipleDataEntriesException - */ - public void addStorageSpace(StorageSpaceData ssd) throws DataAccessException { - - log.debug("ADD StorageSpace Start..."); - StorageSpaceTO ssTO = new StorageSpaceTO(ssd); - log.debug("Storage Space TO Created"); - ssTO.setUpdateTime(new Date()); - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - ssDAO.addStorageSpace(ssTO); - log.debug("StorageSpaceTO inserted in Persistence"); - } - - /** - * @param ssd - * @param updateTime - * - * @throws DataAccessException - */ - public void updateStorageSpace(StorageSpaceData ssd) throws DataAccessException { - - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - - StorageSpaceTO ssTO = new StorageSpaceTO(ssd); - ssTO.setCreated(null); // we don't want to update the creation timestamp - ssTO.setUpdateTime(new Date()); - - ssDAO.updateStorageSpace(ssTO); - log.debug("StorageSpaceTO updated in Persistence"); - } - - /** - * @param ssd - */ - public void updateStorageSpaceFreeSpace(StorageSpaceData ssd) - throws DataAccessException { - - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - StorageSpaceTO ssTO = new StorageSpaceTO(ssd); - log.debug("Storage Space TO Created"); - ssTO.setUpdateTime(new Date()); - ssDAO.updateStorageSpaceFreeSpace(ssTO); - log.debug("StorageSpaceTO updated in Persistence"); - - } - - /** - * @param ssd - * @throws NoDataFoundException - * @throws InvalidRetrievedDataException - * @throws MultipleDataEntriesException - */ - public void updateAllStorageSpace(StorageSpaceData ssd) - throws NoDataFoundException, InvalidRetrievedDataException, - MultipleDataEntriesException { - - updateAllStorageSpace(ssd, null); - } - - /** - * Update StorageSpace. This method is used to update the StorageSpace into - * the ReserveSpace Catalog. The update operation take place after a - * AbortRequest for a PrepareToPut operation done with the spaceToken.(With or - * without the size specified). - */ - - public void updateAllStorageSpace(StorageSpaceData ssd, Date updateTime) - throws NoDataFoundException, InvalidRetrievedDataException, - MultipleDataEntriesException { - - log.debug("UPDATE StorageSpace Start..."); - // Build StorageSpaceTO from SpaceData - StorageSpaceTO ssTO = new StorageSpaceTO(ssd); - log.debug("Storage Space TO Created"); - if (updateTime == null) { - // The update time of the information is now - ssTO.setUpdateTime(new Date()); - } else { - ssTO.setUpdateTime(updateTime); - } - - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - - // Add the row to the persistence.. - try { - ssDAO.updateAllStorageSpace(ssTO); - log.debug("StorageSpaceTO updated in Persistence"); - } catch (DataAccessException daEx) { - log.error("Error while inserting new row in StorageSpace: {}", - daEx.getMessage(), daEx); - } - } - - /** - * @param desc - * @return - */ - public StorageSpaceData getStorageSpaceByAlias(String desc) { - - StorageSpaceData result = null; // new StorageSpaceData(); - log.debug("Retrieve Storage Space start... "); - - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.debug("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - - // Get StorageSpaceTO form persistence - try { - Collection cl = ssDAO.getStorageSpaceByAliasOnly(desc); - if (cl != null && !cl.isEmpty()) { - log.debug("Storage Space retrieved by Token. "); - // Build the result - try { - result = new StorageSpaceData(cl.toArray(new StorageSpaceTO[0])[0]); - } catch (IllegalArgumentException e) { - log.error("unable to build StorageSpaceData from StorageSpaceTO " - + "IllegalArgumentException: {}", e.getMessage(), e); - } - } - } catch (DataAccessException daEx) { - log.debug("Error while retrieving StorageSpace: {}", daEx.getMessage(), - daEx); - } - - return result; - } - - /** - * Provides a list of storage spaces not initialized by comparing the used - * space stored against the well know not initialized value - * NOT_INITIALIZED_SIZE_VALUE - * - * @return SpaceData - */ - public List getStorageSpaceNotInitialized() { - - log.debug("Retrieve Storage Space not initialized start "); - List result = Lists.newLinkedList(); - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.debug("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - // Get StorageSpaceTO form persistence - try { - Collection storagesSpaceTOCollection = ssDAO - .getStorageSpaceByUnavailableUsedSpace(NOT_INITIALIZED_SIZE_VALUE); - log.debug("Storage Space retrieved by not initialized used space. "); - for (StorageSpaceTO storagesSpaceTO : storagesSpaceTOCollection) { - if (storagesSpaceTO != null) { - try { - result.add(new StorageSpaceData(storagesSpaceTO)); - } catch (IllegalArgumentException e) { - log.error("unable to build StorageSpaceData from StorageSpaceTO. " - + "IllegalArgumentException: {}", e.getMessage(), e); - } - } else { - log.warn("Received a collection of StorageSpaceTO containing null " - + "elements, skipping them"); - } - } - } catch (DataAccessException daEx) { - log.debug("Error while retrieving StorageSpace", daEx); - } - return result; - } - - /** - * Provides a list of storage spaces not updated since the provided timestamp - * - * @param lastUpdateTimestamp - * @return - */ - - public List getStorageSpaceByLastUpdate( - Date lastUpdateTimestamp) { - - log.debug("Retrieve Storage Space not initialized start "); - LinkedList result = new LinkedList(); - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.debug("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - // GetStorageSpaceTO form persistence - try { - Collection storagesSpaceTOCollection = ssDAO - .getStorageSpaceByPreviousLastUpdate(lastUpdateTimestamp); - log.debug("Storage Space retrieved by Token previous last update. "); - for (StorageSpaceTO storagesSpaceTO : storagesSpaceTOCollection) { - if (storagesSpaceTO != null) { - try { - result.add(new StorageSpaceData(storagesSpaceTO)); - } catch (IllegalArgumentException e) { - log.error("unable to build StorageSpaceData from StorageSpaceTO " - + "IllegalArgumentException: {}", e.getMessage(), e); - } - } else { - log.warn("Received a collection of StorageSpaceTO containing null " - + "elements, skipping them"); - } - } - } catch (DataAccessException daEx) { - log.debug("Error while retrieving StorageSpace: {}", daEx.getMessage(), - daEx); - } - return result; - } - - /** - * - * @param user - * VomsGridUser - * @param spaceAlias - * String - * @return ArrayOfTSpaceToken - */ - public ArrayOfTSpaceToken getSpaceTokens(GridUserInterface user, - String spaceAlias) { - - ArrayOfTSpaceToken result = new ArrayOfTSpaceToken(); - - log.debug("Retrieving space tokens..."); - - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - - // Get StorageSpaceTO form persistence - try { - - Collection listOfStorageSpace = ssDAO.getStorageSpaceByOwner(user, - spaceAlias); - - int nItems = listOfStorageSpace.size(); - log.debug("getSpaceTokens : Number of Storage spaces retrieved with " - + "Alias '{}': {}", spaceAlias, nItems); - Iterator j_ssTO = listOfStorageSpace.iterator(); - - while (j_ssTO.hasNext()) { - StorageSpaceTO ssTO = (StorageSpaceTO) j_ssTO.next(); - try { - TSpaceToken spaceToken = TSpaceToken.make(ssTO.getSpaceToken()); - result.addTSpaceToken(spaceToken); - } catch (InvalidTSpaceTokenAttributesException ex2) { - log.error("Retrieved invalid Space token from DB"); - } - } - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpace: {}", daEx.getMessage(), - daEx); - } catch (Exception e) { - log.error("Exception while retrieving Storage Space: {}", e.getMessage(), - e); - } - return result; - } - - /** - * This method is used for the VOspaceArea Check. - * - * @param spaceAlias - * @return - */ - - public ArrayOfTSpaceToken getSpaceTokensByAlias(String spaceAlias) { - - ArrayOfTSpaceToken result = new ArrayOfTSpaceToken(); - - log.debug("Retrieving space tokens..."); - - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - - // Get StorageSpaceTO form persistence - try { - Collection listOfStorageSpace = ssDAO - .getStorageSpaceByAliasOnly(spaceAlias); - - int nItems = listOfStorageSpace.size(); - log.debug("Number of Storage spaces retrieved: {}", nItems); - Iterator j_ssTO = listOfStorageSpace.iterator(); - - while (j_ssTO.hasNext()) { - StorageSpaceTO ssTO = (StorageSpaceTO) j_ssTO.next(); - try { - TSpaceToken spaceToken = TSpaceToken.make(ssTO.getSpaceToken()); - result.addTSpaceToken(spaceToken); - } catch (InvalidTSpaceTokenAttributesException ex2) { - log.error("Retrieved invalid Space token from DB"); - } - } - - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpace: {}", daEx.getMessage(), - daEx); - } catch (Exception e) { - log.error("Error getting data! Error: {}", e.getMessage(), e); - } - return result; - } - - /** - * This method is used for the VOspaceArea Check. - * - * @param VOname - * @return - */ - - public ArrayOfTSpaceToken getSpaceTokensBySpaceType(String stype) { - - ArrayOfTSpaceToken result = new ArrayOfTSpaceToken(); - - log.debug("Retrieving space tokens..."); - - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - - // Get StorageSpaceTO form persistence - try { - Collection listOfStorageSpace = ssDAO.getStorageSpaceBySpaceType(stype); - - int nItems = listOfStorageSpace.size(); - log.debug("Number of Storage spaces retrieved: {}", nItems); - Iterator j_ssTO = listOfStorageSpace.iterator(); - - while (j_ssTO.hasNext()) { - StorageSpaceTO ssTO = (StorageSpaceTO) j_ssTO.next(); - try { - TSpaceToken spaceToken = TSpaceToken.make(ssTO.getSpaceToken()); - result.addTSpaceToken(spaceToken); - } catch (InvalidTSpaceTokenAttributesException ex2) { - log.error("Retrieved invalid Space token from DB"); - } - } - - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpace: {}", daEx.getMessage(), daEx); - } catch (Exception e) { - log.error("Generic Error while retrieving StorageSpace: {}", e.getMessage(), e); - } - return result; - } - - // ************************ CHECH BELOW METHODS *************************** - - /** - * - * @param user - * GridUserInterface - * @param spaceToken - * TSpaceToken - * @return boolean - */ - public boolean release(GridUserInterface user, final TSpaceToken spaceToken) { - - log.debug("Delete storage spaceToken info from persistence: {}", spaceToken); - - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", - daEx.getMessage(), daEx); - } - boolean rowRemoved = true; - // Delete the row from persistence. - try { - ssDAO.removeStorageSpace(user, spaceToken.getValue()); - log.debug("spaceToken removed from DB."); - } catch (DataAccessException daEx) { - log.error("spaceToken not found in the DB: {}", spaceToken.getValue()); - rowRemoved = false; - } - return rowRemoved; - } - - /** - * Method that purges the catalog, removing expired space reservation. The - * spacefile with lifetime expired are removed from the file systems. - * - */ - public void purge() { - - log.debug("Space Garbage Collector start!"); - Calendar rightNow = Calendar.getInstance(); - - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - // Get the Collection of Space Resrvation Expired - Collection expiredSpaceTO; - try { - expiredSpaceTO = ssDAO.getExpired(rightNow.getTimeInMillis() / 1000); - } catch (DataAccessException e) { - // No space expired FOUND - log.debug("Space Garbage Collector: no space expired found."); - return; - } - - // For each entry expired - // 1) Delete the related space file - // 2) Remove the entry from the DB - - StorageSpaceTO spaceTO = null; - log.debug("Space Garbage Collector: Number of SpaceFile to remove {}.", - expiredSpaceTO.size()); - - for (Iterator i = expiredSpaceTO.iterator(); i.hasNext();) { - spaceTO = (StorageSpaceTO) i.next(); - // Deleteing space File - String spaceFileName = spaceTO.getSpaceFile(); - File sfile = new File(spaceFileName); - log.debug("Space Garbage Collector: SpaceFile to remove {}.", spaceFileName); - - if (sfile.delete()) { - log.debug("Space Garbage Collector: SpaceFile {} removed.", spaceFileName); - } else { - log.warn("Space Garbage Collector: problem removing {}", spaceFileName); - } - - // Removing space entry from the DB - try { - ssDAO.removeStorageSpace(spaceTO.getSpaceToken()); - } catch (DataAccessException e) { - log.warn("Space Garbage Collector: error removing space entry from catalog."); - } - - } - - } - - public boolean increaseUsedSpace(String spaceToken, Long usedSpaceToAdd) { - - log.debug("Increase {} the used space of storage spaceToken: {}", - usedSpaceToAdd, spaceToken); - - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", - daEx.getMessage(), daEx); - return false; - } - int n = 0; - try { - n = ssDAO.increaseUsedSpace(spaceToken, usedSpaceToAdd); - } catch (DataAccessException daEx) { - log.error( - "Error during the increase of used space for spaceToken {}: {}", - spaceToken, daEx.getMessage()); - return false; - } - if (n == 0) { - log.warn( - "No errors caught but it seems no used space updates done on space token {}", - spaceToken); - } - log.debug("{} increaseUsedSpace += {}", spaceToken, usedSpaceToAdd); - return n > 0; - } - - public boolean decreaseUsedSpace(String spaceToken, Long usedSpaceToRemove) { - - log.debug("Decrease {} the used space of storage spaceToken: {}", - usedSpaceToRemove, spaceToken); - - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", - daEx.getMessage(), daEx); - return false; - } - int n = 0; - try { - n = ssDAO.decreaseUsedSpace(spaceToken, usedSpaceToRemove); - } catch (DataAccessException daEx) { - log.error( - "Error during the decrease of used space for spaceToken {}: {}", - spaceToken, daEx.getMessage()); - return false; - } - if (n == 0) { - log.warn( - "No errors caught but it seems no used space updates done on space token {}", - spaceToken); - } - log.debug("{} decreaseUsedSpace -= {}", spaceToken, usedSpaceToRemove); - return n > 0; - } + private static final Logger log = LoggerFactory.getLogger(ReservedSpaceCatalog.class); + + private static Set voSA_spaceTokenSet = Sets.newHashSet(); + private static Map voSA_UpdateTime = Maps.newHashMap(); + + private static final long NOT_INITIALIZED_SIZE_VALUE = -1L; + + private static ReservedSpaceCatalog instance; + + public static synchronized ReservedSpaceCatalog getInstance() { + if (instance == null) { + instance = new ReservedSpaceCatalog(); + } + return instance; + } + + private StorageSpaceDAO ssDAO; + + private ReservedSpaceCatalog() { + + log.debug("Building Reserve Space Catalog..."); + ssDAO = StorageSpaceDAOMySql.getInstance(); + } + + /********************************************* + * STATIC METHODS + *********************************************/ + public static void addSpaceToken(TSpaceToken token) { + + voSA_spaceTokenSet.add(token); + voSA_UpdateTime.put(token, null); + } + + public static Set getTokenSet() { + + return voSA_spaceTokenSet; + } + + public static void clearTokenSet() { + + voSA_spaceTokenSet.clear(); + voSA_UpdateTime.clear(); + } + + /** + * Basic method used to retrieve all the information about a StorageSpace - StorageSpace is + * selected by SpaceToken + * + * @param spaceToken TSpaceToken + * @return StorageSpaceData, null if no-one SS exists with the specified spaceToken + * @throws DataAccessException + */ + public StorageSpaceData getStorageSpace(TSpaceToken spaceToken) + throws TransferObjectDecodingException, DataAccessException { + + StorageSpaceData result = null; + ssDAO = StorageSpaceDAOMySql.getInstance(); + log.debug("Storage Space DAO retrieved."); + StorageSpaceTO ssTO = ssDAO.getStorageSpaceByToken(spaceToken.getValue()); + log.debug("Storage Space retrieved by Token. "); + if (ssTO != null) { + try { + result = new StorageSpaceData(ssTO); + } catch (IllegalArgumentException e) { + log.error( + "Error building StorageSpaceData from StorageSpaceTO " + "IllegalArgumentException: {}", + e.getLocalizedMessage(), e); + throw new TransferObjectDecodingException( + "Unable to build StorageSpaceData from StorageSpaceTO"); + } + } else { + log.info("Unable to build StorageSpaceData. No StorageSpaceTO built " + "from the DB"); + } + return result; + } + + /** + * Create a new StorageSpace entry into the DB. It is used for - STATIC Space Creation - DYNAMIC + * Space Reservation + * + * @param ssd + * @throws NoDataFoundException + * @throws InvalidRetrievedDataException + * @throws MultipleDataEntriesException + */ + public void addStorageSpace(StorageSpaceData ssd) throws DataAccessException { + + log.debug("ADD StorageSpace Start..."); + StorageSpaceTO ssTO = new StorageSpaceTO(ssd); + log.debug("Storage Space TO Created"); + ssTO.setUpdateTime(new Date()); + log.debug("Storage Space DAO retrieved."); + ssDAO.addStorageSpace(ssTO); + log.debug("StorageSpaceTO inserted in Persistence"); + } + + /** + * Update all the fields apart from the alias of a storage space row given the input + * StorageSpaceData + * + * @param ssd + * + * @throws DataAccessException + */ + public void updateStorageSpace(StorageSpaceData ssd) throws DataAccessException { + + log.debug("Storage Space DAO retrieved."); + + StorageSpaceTO ssTO = new StorageSpaceTO(ssd); + ssTO.setCreated(null); // we don't want to update the creation timestamp + ssTO.setUpdateTime(new Date()); + + ssDAO.updateStorageSpace(ssTO); + log.debug("StorageSpaceTO updated in Persistence"); + } + + /** + * @param ssd + */ + public void updateStorageSpaceFreeSpace(StorageSpaceData ssd) throws DataAccessException { + + log.debug("Storage Space DAO retrieved."); + StorageSpaceTO ssTO = new StorageSpaceTO(ssd); + log.debug("Storage Space TO Created"); + ssTO.setUpdateTime(new Date()); + ssDAO.updateStorageSpaceFreeSpace(ssTO); + log.debug("StorageSpaceTO updated in Persistence"); + + } + + /** + * @param ssd + * @throws NoDataFoundException + * @throws InvalidRetrievedDataException + * @throws MultipleDataEntriesException + */ + public void updateAllStorageSpace(StorageSpaceData ssd) { + + updateAllStorageSpace(ssd, null); + } + + /** + * Update StorageSpace. This method is used to update the StorageSpace into the ReserveSpace + * Catalog. The update operation take place after a AbortRequest for a PrepareToPut operation done + * with the spaceToken.(With or without the size specified). + */ + + public void updateAllStorageSpace(StorageSpaceData ssd, Date updateTime) { + + log.debug("UPDATE StorageSpace Start..."); + // Build StorageSpaceTO from SpaceData + StorageSpaceTO ssTO = new StorageSpaceTO(ssd); + log.debug("Storage Space TO Created"); + if (updateTime == null) { + // The update time of the information is now + ssTO.setUpdateTime(new Date()); + } else { + ssTO.setUpdateTime(updateTime); + } + + // Add the row to the persistence.. + try { + ssDAO.updateAllStorageSpace(ssTO); + log.debug("StorageSpaceTO updated in Persistence"); + } catch (DataAccessException daEx) { + log.error("Error while inserting new row in StorageSpace: {}", daEx.getMessage(), daEx); + } + } + + /** + * @param desc + * @return + */ + public StorageSpaceData getStorageSpaceByAlias(String desc) { + + StorageSpaceData result = null; // new StorageSpaceData(); + log.debug("Retrieve Storage Space start... "); + + // Get StorageSpaceTO form persistence + try { + Collection cl = ssDAO.getStorageSpaceByAliasOnly(desc); + if (cl != null && !cl.isEmpty()) { + log.debug("Storage Space retrieved by Token. "); + // Build the result + try { + result = new StorageSpaceData(cl.toArray(new StorageSpaceTO[0])[0]); + } catch (IllegalArgumentException e) { + log.error("unable to build StorageSpaceData from StorageSpaceTO " + + "IllegalArgumentException: {}", e.getMessage(), e); + } + } + } catch (DataAccessException daEx) { + log.debug("Error while retrieving StorageSpace: {}", daEx.getMessage(), daEx); + } + + return result; + } + + /** + * Provides a list of storage spaces not initialized by comparing the used space stored against + * the well know not initialized value NOT_INITIALIZED_SIZE_VALUE + * + * @return SpaceData + */ + public List getStorageSpaceNotInitialized() { + + log.debug("Retrieve Storage Space not initialized start "); + List result = Lists.newLinkedList(); + + // Get StorageSpaceTO form persistence + try { + Collection storagesSpaceTOCollection = + ssDAO.getStorageSpaceByUnavailableUsedSpace(NOT_INITIALIZED_SIZE_VALUE); + log.debug("Storage Space retrieved by not initialized used space. "); + for (StorageSpaceTO storagesSpaceTO : storagesSpaceTOCollection) { + if (storagesSpaceTO != null) { + try { + result.add(new StorageSpaceData(storagesSpaceTO)); + } catch (IllegalArgumentException e) { + log.error("unable to build StorageSpaceData from StorageSpaceTO. " + + "IllegalArgumentException: {}", e.getMessage(), e); + } + } else { + log.warn("Received a collection of StorageSpaceTO containing null " + + "elements, skipping them"); + } + } + } catch (DataAccessException daEx) { + log.debug("Error while retrieving StorageSpace", daEx); + } + return result; + } + + /** + * + * @param user VomsGridUser + * @param spaceAlias String + * @return ArrayOfTSpaceToken + */ + public ArrayOfTSpaceToken getSpaceTokens(GridUserInterface user, String spaceAlias) { + + ArrayOfTSpaceToken result = new ArrayOfTSpaceToken(); + + log.debug("Retrieving space tokens..."); + + try { + + Collection listOfStorageSpace = + ssDAO.getStorageSpaceByOwner(user, spaceAlias); + int nItems = listOfStorageSpace.size(); + log.debug("getSpaceTokens : Number of Storage spaces retrieved with " + "Alias '{}': {}", + spaceAlias, nItems); + Iterator j_ssTO = listOfStorageSpace.iterator(); + + while (j_ssTO.hasNext()) { + StorageSpaceTO ssTO = (StorageSpaceTO) j_ssTO.next(); + try { + TSpaceToken spaceToken = TSpaceToken.make(ssTO.getSpaceToken()); + result.addTSpaceToken(spaceToken); + } catch (InvalidTSpaceTokenAttributesException ex2) { + log.error("Retrieved invalid Space token from DB"); + } + } + + } catch (DataAccessException daEx) { + log.error("Error while retrieving StorageSpace: {}", daEx.getMessage(), daEx); + } catch (Exception e) { + log.error("Exception while retrieving Storage Space: {}", e.getMessage(), e); + } + return result; + } + + /** + * This method is used for the VOspaceArea Check. + * + * @param spaceAlias + * @return + */ + + public ArrayOfTSpaceToken getSpaceTokensByAlias(String spaceAlias) { + + ArrayOfTSpaceToken result = new ArrayOfTSpaceToken(); + + log.debug("Retrieving space tokens..."); + + try { + + Collection listOfStorageSpace = ssDAO.getStorageSpaceByAliasOnly(spaceAlias); + int nItems = listOfStorageSpace.size(); + log.debug("Number of Storage spaces retrieved: {}", nItems); + Iterator j_ssTO = listOfStorageSpace.iterator(); + + while (j_ssTO.hasNext()) { + StorageSpaceTO ssTO = (StorageSpaceTO) j_ssTO.next(); + try { + TSpaceToken spaceToken = TSpaceToken.make(ssTO.getSpaceToken()); + result.addTSpaceToken(spaceToken); + } catch (InvalidTSpaceTokenAttributesException ex2) { + log.error("Retrieved invalid Space token from DB"); + } + } + + } catch (DataAccessException daEx) { + log.error("Error while retrieving StorageSpace: {}", daEx.getMessage(), daEx); + } catch (Exception e) { + log.error("Error getting data! Error: {}", e.getMessage(), e); + } + return result; + } + + // ************************ CHECH BELOW METHODS *************************** + + /** + * + * @param user GridUserInterface + * @param spaceToken TSpaceToken + * @return boolean + */ + public boolean release(GridUserInterface user, final TSpaceToken spaceToken) { + + log.debug("Delete storage spaceToken info from persistence: {}", spaceToken); + + boolean rowRemoved = true; + // Delete the row from persistence. + try { + ssDAO.removeStorageSpace(user, spaceToken.getValue()); + log.debug("spaceToken removed from DB."); + } catch (DataAccessException daEx) { + log.error("spaceToken not found in the DB: {}", spaceToken.getValue()); + rowRemoved = false; + } + return rowRemoved; + } + + /** + * Method that purges the catalog, removing expired space reservation. The spacefile with lifetime + * expired are removed from the file systems. + * + */ + public void purge() { + + log.debug("Space Garbage Collector start!"); + Calendar rightNow = Calendar.getInstance(); + + // Get the Collection of Space Reservation Expired + Collection expiredSpaceTO; + try { + expiredSpaceTO = ssDAO.getExpired(rightNow.getTimeInMillis() / 1000); + } catch (DataAccessException e) { + // No space expired FOUND + log.debug("Space Garbage Collector: no space expired found."); + return; + } + + // For each entry expired + // 1) Delete the related space file + // 2) Remove the entry from the DB + + StorageSpaceTO spaceTO = null; + log.debug("Space Garbage Collector: Number of SpaceFile to remove {}.", expiredSpaceTO.size()); + + for (Iterator i = expiredSpaceTO.iterator(); i.hasNext();) { + spaceTO = (StorageSpaceTO) i.next(); + // Deleting space File + String spaceFileName = spaceTO.getSpaceFile(); + File sfile = new File(spaceFileName); + log.debug("Space Garbage Collector: SpaceFile to remove {}.", spaceFileName); + + if (sfile.delete()) { + log.debug("Space Garbage Collector: SpaceFile {} removed.", spaceFileName); + } else { + log.warn("Space Garbage Collector: problem removing {}", spaceFileName); + } + + // Removing space entry from the DB + try { + ssDAO.removeStorageSpace(spaceTO.getSpaceToken()); + } catch (DataAccessException e) { + log.warn("Space Garbage Collector: error removing space entry from catalog."); + } + } + } + + public boolean increaseUsedSpace(String spaceToken, Long usedSpaceToAdd) { + + log.debug("Increase {} the used space of storage spaceToken: {}", usedSpaceToAdd, spaceToken); + + int n = 0; + try { + n = ssDAO.increaseUsedSpace(spaceToken, usedSpaceToAdd); + } catch (DataAccessException daEx) { + log.error("Error during the increase of used space for spaceToken {}: {}", spaceToken, + daEx.getMessage()); + return false; + } + if (n == 0) { + log.warn("No errors caught but it seems no used space updates done on space token {}", + spaceToken); + } + log.debug("{} increaseUsedSpace += {}", spaceToken, usedSpaceToAdd); + return n > 0; + } + + public boolean decreaseUsedSpace(String spaceToken, Long usedSpaceToRemove) { + + log.debug("Decrease {} the used space of storage spaceToken: {}", usedSpaceToRemove, + spaceToken); + + int n = 0; + try { + n = ssDAO.decreaseUsedSpace(spaceToken, usedSpaceToRemove); + } catch (DataAccessException daEx) { + log.error("Error during the decrease of used space for spaceToken {}: {}", spaceToken, + daEx.getMessage()); + return false; + } + if (n == 0) { + log.warn("No errors caught but it seems no used space updates done on space token {}", + spaceToken); + } + log.debug("{} decreaseUsedSpace -= {}", spaceToken, usedSpaceToRemove); + return n > 0; + } } diff --git a/src/main/java/it/grid/storm/catalogs/SizeInBytesIntConverter.java b/src/main/java/it/grid/storm/catalogs/SizeInBytesIntConverter.java deleted file mode 100644 index 75b940053..000000000 --- a/src/main/java/it/grid/storm/catalogs/SizeInBytesIntConverter.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TSizeInBytes; - -/** - * Class that handles DB representation of a TSizeInBytes, in particular it - * takes care of the NULL logic of the DB: 0/null are used to mean an empty - * field, whereas StoRM Object model uses the type TSizeInBytes.makeEmpty(); - * moreover StoRM does accept 0 as a valid TSizeInBytes, so it _is_ important to - * use this converter! - * - * @author EGRID ICTP - * @version 2.0 - * @date July 2005 - */ -public class SizeInBytesIntConverter { - - private static SizeInBytesIntConverter stc = new SizeInBytesIntConverter(); - - private SizeInBytesIntConverter() { - - } - - /** - * Method that returns the only instance of SizeInBytesIntConverter - */ - public static SizeInBytesIntConverter getInstance() { - - return stc; - } - - /** - * Method that transaltes the Empty TSizeInBytes into the empty representation - * of DB which is 0. Any other int is left as is. - */ - public long toDB(long s) { - - if (s == TSizeInBytes.makeEmpty().value()) - return 0; - return s; - } - - /** - * Method that returns the int as is, except if it is 0 which DB interprests - * as empty field: in that case it then returns the Empty TSizeInBytes int - * representation. - */ - public long toStoRM(long s) { - - if (s == 0) - return TSizeInBytes.makeEmpty().value(); - return s; - } -} diff --git a/src/main/java/it/grid/storm/catalogs/SpaceTokenStringConverter.java b/src/main/java/it/grid/storm/catalogs/SpaceTokenStringConverter.java deleted file mode 100644 index 4aacf3804..000000000 --- a/src/main/java/it/grid/storm/catalogs/SpaceTokenStringConverter.java +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TSpaceToken; - -/** - * Class that handles DPM DB representation of a SpaceToken, in particular it - * takes care of the NULL/EMPTY logic of DPM. In particular DPM uses the empty - * string "" as meaning the absence of a value for the field, wheras StoRM - * accepts it as a valis String with which to create a TSpaceToken; moreover - * StoRM uses an Empty TSpaceToken type. - * - * @author EGRID ICTP - * @version 1.0 - * @date June 2005 - */ -class SpaceTokenStringConverter { - - private static SpaceTokenStringConverter stc = new SpaceTokenStringConverter(); - - private SpaceTokenStringConverter() { - - } - - /** - * Method that returns the only instance od SpaceTokenConverter - */ - public static SpaceTokenStringConverter getInstance() { - - return stc; - } - - /** - * Method that translates StoRM Empty TSpaceToken String representation into - * DPM empty representation; all other Strings are left as are. - */ - public String toDB(String s) { - - if (s.equals(TSpaceToken.makeEmpty().toString())) - return ""; - return s; - } - - /** - * Method that translates DPM String representing an Empty TSpaceToken into - * StoRM representation; any other String is left as is. - */ - public String toStoRM(String s) { - - if ((s == null) || (s.equals(""))) - return TSpaceToken.makeEmpty().toString(); - return s; - } -} diff --git a/src/main/java/it/grid/storm/catalogs/StoRMDataSource.java b/src/main/java/it/grid/storm/catalogs/StoRMDataSource.java deleted file mode 100644 index 663d4f476..000000000 --- a/src/main/java/it/grid/storm/catalogs/StoRMDataSource.java +++ /dev/null @@ -1,181 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.config.Configuration; - -import java.sql.Connection; -import java.sql.SQLException; - -import javax.sql.DataSource; - -import org.apache.commons.dbcp2.BasicDataSource; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - - -public class StoRMDataSource { - - public static final Logger log = LoggerFactory - .getLogger(StoRMDataSource.class); - - public static class Builder{ - - private static final String VALIDATION_QUERY = "select 1 from dual"; - - private String driver; - private String url; - - private String username; - private String password; - - private int maxPooledConnections = 200; - private int initialPoolSize = 10; - - private BasicDataSource ds; - - public Builder() { - } - - public Builder driver(String driver){ - this.driver = driver; - return this; - } - - public Builder url(String url){ - this.url = url; - return this; - } - - public Builder username(String username){ - this.username = username; - return this; - } - - public Builder password(String password){ - this.password = password; - return this; - } - - public Builder maxPooledConnections(int maxPool){ - if (maxPool < 1){ - throw new IllegalArgumentException("maxPooledConnections must be >= 1"); - } - this.maxPooledConnections = maxPool; - return this; - } - - public Builder initialPoolSize(int initialSize){ - if (initialSize <= 0){ - throw new IllegalArgumentException("initialSize must be >= 0"); - } - this.initialPoolSize = initialSize; - return this; - } - - private void sanityChecks(){ - if ((username == null) || (username.isEmpty())) - throw new IllegalArgumentException("null or empty username"); - - if ((driver == null) || (driver.isEmpty())) - throw new IllegalArgumentException("null or empty driver"); - - if ((url == null) || (url.isEmpty())) - throw new IllegalArgumentException("null or empty url"); - - if ((password == null) || (password.isEmpty())) - throw new IllegalArgumentException("null or empty password"); - } - - private void logConfiguration(){ - if (log.isDebugEnabled()){ - log.debug("driver: {}", driver); - log.debug("url: {}", url); - log.debug("username: {}", username); - log.debug("password: {}", password); - log.debug("initialPoolSize: {}", initialPoolSize); - log.debug("maxPooledConnections: {}", maxPooledConnections); - } - } - public StoRMDataSource build(){ - sanityChecks(); - logConfiguration(); - ds = new BasicDataSource(); - ds.setDriverClassName(driver); - ds.setUrl(url); - ds.setUsername(username); - ds.setPassword(password); - ds.setInitialSize(initialPoolSize); - ds.setMaxTotal(maxPooledConnections); - ds.setValidationQuery(VALIDATION_QUERY); - ds.setTestWhileIdle(true); - ds.setPoolPreparedStatements(true); - ds.setMaxOpenPreparedStatements(200); - return new StoRMDataSource(this); - } - - } - - private StoRMDataSource(Builder b) { - this.dataSource = b.ds; - } - - private BasicDataSource dataSource; - - - /** - * @return the dataSource - */ - public DataSource getDataSource() { - return dataSource; - } - - - /** - * @throws SQLException - * @see org.apache.commons.dbcp.BasicDataSource#close() - */ - public void close() throws SQLException { - dataSource.close(); - } - - - - /** - * @return - * @throws SQLException - * @see org.apache.commons.dbcp.BasicDataSource#getConnection() - */ - public Connection getConnection() throws SQLException { - return dataSource.getConnection(); - } - - private static volatile StoRMDataSource instance = null; - - public static synchronized StoRMDataSource getInstance(){ - return instance; - } - - public static synchronized void init(){ - if (instance != null){ - log.warn("Called init on already initialized Storm data source."); - log.warn("The datasource will be closed and re-initialized."); - try { - instance.close(); - } catch (SQLException e) { - log.error("Error closing storm data source: {}", e.getMessage(), e); - } - } - - log.info("Initializing StoRM datasource"); - Configuration conf = Configuration.getInstance(); - instance = new StoRMDataSource.Builder() - .driver(conf.getDBDriver()) - .url(conf.getStormDbURL()) - .username(conf.getDBUserName()) - .password(conf.getDBPassword()) - .build(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/TURLConverter.java b/src/main/java/it/grid/storm/catalogs/TURLConverter.java deleted file mode 100644 index 54df7da75..000000000 --- a/src/main/java/it/grid/storm/catalogs/TURLConverter.java +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TTURL; - -/** - * Class that handles DPM DB representation of a TTURL, in particular it takes - * care of the NULL/EMPTY logic of DPM. Indeed DPM uses 0/null to mean an empty - * field, whereas StoRM uses the type TTURL.makeEmpty(); in particular StoRM - * converts an empty String or a null to an Empty TTURL! - * - * @author EGRID ICTP - * @version 1.0 - * @date March 2006 - */ -public class TURLConverter { - - private static TURLConverter stc = new TURLConverter(); // only instance - - private TURLConverter() { - - } - - /** - * Method that returns the only instance of SizeInBytesIntConverter - */ - public static TURLConverter getInstance() { - - return stc; - } - - /** - * Method that transaltes the Empty TTURL into the empty representation of DPM - * which is a null! Any other String is left as is. - */ - public String toDB(String s) { - - if (s.equals(TTURL.makeEmpty().toString())) - return null; - return s; - } - - /** - * Method that translates DPMs "" or null String as the Empty TTURL String - * representation. Any other String is left as is. - */ - public String toStoRM(String s) { - - if ((s == null) || (s.equals(""))) - return TTURL.makeEmpty().toString(); - return s; - } -} diff --git a/src/main/java/it/grid/storm/tape/recalltable/TapeRecallCatalog.java b/src/main/java/it/grid/storm/catalogs/TapeRecallCatalog.java similarity index 75% rename from src/main/java/it/grid/storm/tape/recalltable/TapeRecallCatalog.java rename to src/main/java/it/grid/storm/catalogs/TapeRecallCatalog.java index 04345b8fc..3c9ed8b53 100644 --- a/src/main/java/it/grid/storm/tape/recalltable/TapeRecallCatalog.java +++ b/src/main/java/it/grid/storm/catalogs/TapeRecallCatalog.java @@ -2,56 +2,58 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -/** - * - */ -package it.grid.storm.tape.recalltable; +package it.grid.storm.catalogs; import static it.grid.storm.persistence.model.TapeRecallTO.RecallTaskType.BOL; import static it.grid.storm.persistence.model.TapeRecallTO.RecallTaskType.PTG; -import com.google.common.collect.Lists; - -import it.grid.storm.asynch.Suspendedable; -import it.grid.storm.catalogs.BoLPersistentChunkData; -import it.grid.storm.catalogs.PersistentChunkData; -import it.grid.storm.catalogs.PtGData; -import it.grid.storm.catalogs.RequestData; -import it.grid.storm.persistence.PersistenceDirector; -import it.grid.storm.persistence.dao.TapeRecallDAO; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.model.TapeRecallTO; -import it.grid.storm.tape.recalltable.model.TapeRecallStatus; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.Collection; import java.util.Collections; import java.util.Date; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; + +import it.grid.storm.asynch.Suspendedable; +import it.grid.storm.persistence.dao.TapeRecallDAO; +import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.impl.mysql.TapeRecallDAOMySql; +import it.grid.storm.persistence.model.BoLPersistentChunkData; +import it.grid.storm.persistence.model.PersistentChunkData; +import it.grid.storm.persistence.model.PtGData; +import it.grid.storm.persistence.model.RequestData; +import it.grid.storm.persistence.model.TapeRecallTO; +import it.grid.storm.tape.recalltable.model.TapeRecallStatus; + public class TapeRecallCatalog { private static final Logger log = LoggerFactory.getLogger(TapeRecallCatalog.class); - private final TapeRecallDAO tapeRecallDAO; + private static TapeRecallCatalog instance; private static Map> recallBuckets = new ConcurrentHashMap<>(); - /** - * Default constructor - * - * @throws DataAccessException - */ - public TapeRecallCatalog() { + public static synchronized TapeRecallCatalog getInstance() { + if (instance == null) { + instance = new TapeRecallCatalog(); + } + return instance; + } + + private TapeRecallDAO tapeRecallDAO; + + private TapeRecallCatalog() { - tapeRecallDAO = PersistenceDirector.getDAOFactory().getTapeRecallDAO(); + tapeRecallDAO = TapeRecallDAOMySql.getInstance(); } /** @@ -87,25 +89,6 @@ public int getNumberTaskInProgress() throws DataAccessException { return result; } - /** - * Determines how many task rows have an in-progress state given a certain VO - * - * @param voName @return @throws DataAccessException - */ - public int getNumberTaskInProgress(String voName) throws DataAccessException { - - int result = -1; - try { - result = tapeRecallDAO.getNumberInProgress(voName); - } catch (DataAccessException e) { - log.error( - "Unable to retrieve the number of tasks currently in progress. DataAccessException: {}", - e.getMessage(), e); - throw e; - } - return result; - } - /** * Determines how many task rows have a queued state * @@ -124,24 +107,6 @@ public int getNumberTaskQueued() throws DataAccessException { return result; } - /** - * Determines how many task rows have a queued state given a certain VO - * - * @return @throws DataAccessException - */ - public int getNumberTaskQueued(String voName) throws DataAccessException { - - int result = -1; - try { - result = tapeRecallDAO.getNumberQueued(voName); - } catch (DataAccessException e) { - log.error("Unable to retrieve the number of tasks queued. DataAccessException: {}", - e.getMessage(), e); - throw e; - } - return result; - } - /** * * Determines how many task rows have a queued state and their deferred start time is elapsed @@ -162,30 +127,11 @@ public int getReadyForTakeOver() throws DataAccessException { return result; } - /** - * Determines how many task rows given a certain VO have a queued state and their deferred start - * time is elapsed - * - * @return @throws DataAccessException - */ - public int getReadyForTakeOver(String voName) throws DataAccessException { - - int result = -1; - try { - result = tapeRecallDAO.getReadyForTakeOver(voName); - } catch (DataAccessException e) { - log.error( - "Unable to retrieve the number of tasks ready for the take-over. DataAccessException: {}", - e.getMessage(), e); - throw e; - } - return result; - } - /** * @param taskId @param requestToken @return @throws DataAccessException */ - public TapeRecallTO getTask(UUID taskId, String requestToken) throws DataAccessException { + public Optional getTask(UUID taskId, String requestToken) + throws DataAccessException { return tapeRecallDAO.getTask(taskId, requestToken); } @@ -268,48 +214,6 @@ public List getAllInProgressTasks(int numberOfTaks) { return taskList; } - /** - * @return - */ - public TapeRecallTO takeoverTask() { - - TapeRecallTO task = null; - try { - task = tapeRecallDAO.takeoverTask(); - } catch (DataAccessException e) { - log.error("Unable to takeove a task.", e); - } - return task; - } - - /** - * @param voName @return - */ - public TapeRecallTO takeoverTask(String voName) { - - TapeRecallTO task = null; - try { - task = tapeRecallDAO.takeoverTask(voName); - } catch (DataAccessException e) { - log.error("Unable to takeover a task for vo {}", voName, e); - } - return task; - } - - /** - * @param numberOfTaks @param voName @return - */ - public List takeoverTasks(int numberOfTaks, String voName) { - - List taskList = Lists.newArrayList(); - try { - taskList.addAll(tapeRecallDAO.takeoverTasksWithDoubles(numberOfTaks, voName)); - } catch (DataAccessException e) { - log.error("Unable to takeover {} tasks for vo {}", numberOfTaks, voName, e); - } - return taskList; - } - /** * Method used by PtGChunk and BoLChunk to request the recall of a file * @@ -442,4 +346,5 @@ private void updateChuncksStatus(Collection chunkBucket, } } + } diff --git a/src/main/java/it/grid/storm/catalogs/TransferProtocolListConverter.java b/src/main/java/it/grid/storm/catalogs/TransferProtocolListConverter.java deleted file mode 100644 index 205e25273..000000000 --- a/src/main/java/it/grid/storm/catalogs/TransferProtocolListConverter.java +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import java.util.Iterator; -import java.util.List; -import java.util.ArrayList; -import it.grid.storm.namespace.model.Protocol; - -/** - * Package private auxiliary class used to convert between the DB raw data - * representation and StoRM s Object model list of transfer protocols. - * - */ - -class TransferProtocolListConverter { - - /** - * Method that returns a List of Uppercase Strings used in the DB to represent - * the given TURLPrefix. An empty List is returned in case the conversion does - * not succeed, a null TURLPrefix is supplied, or its size is 0. - */ - public static List toDB(TURLPrefix turlPrefix) { - - List result = new ArrayList(); - Protocol protocol; - for (Iterator it = turlPrefix.getDesiredProtocols().iterator(); it - .hasNext();) { - protocol = it.next(); - result.add(protocol.getSchema()); - } - return result; - } - - /** - * Method that returns a TURLPrefix of transfer protocol. If the translation - * cannot take place, a TURLPrefix of size 0 is returned. Likewise if a null - * List is supplied. - */ - public static TURLPrefix toSTORM(List listOfProtocol) { - - TURLPrefix turlPrefix = new TURLPrefix(); - Protocol protocol = null; - for (Iterator i = listOfProtocol.iterator(); i.hasNext();) { - protocol = Protocol.getProtocol(i.next()); - if (!(protocol.equals(Protocol.UNKNOWN))) - turlPrefix.addProtocol(protocol); - } - return turlPrefix; - } -} diff --git a/src/main/java/it/grid/storm/catalogs/VolatileAndJiTCatalog.java b/src/main/java/it/grid/storm/catalogs/VolatileAndJiTCatalog.java index 149280773..97f748ae0 100644 --- a/src/main/java/it/grid/storm/catalogs/VolatileAndJiTCatalog.java +++ b/src/main/java/it/grid/storm/catalogs/VolatileAndJiTCatalog.java @@ -4,18 +4,6 @@ */ package it.grid.storm.catalogs; -import it.grid.storm.acl.AclManager; -import it.grid.storm.acl.AclManagerFS; -import it.grid.storm.common.types.PFN; -import it.grid.storm.common.types.TimeUnit; -import it.grid.storm.config.Configuration; -import it.grid.storm.filesystem.FilesystemPermission; -import it.grid.storm.filesystem.LocalFile; -import it.grid.storm.griduser.LocalUser; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.srm.types.TLifeTimeInSeconds; - -import java.util.ArrayList; import java.util.Calendar; import java.util.Collection; import java.util.Iterator; @@ -26,41 +14,50 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.collect.Lists; + +import it.grid.storm.acl.AclManager; +import it.grid.storm.acl.AclManagerFS; +import it.grid.storm.common.types.PFN; +import it.grid.storm.common.types.TimeUnit; +import it.grid.storm.config.StormConfiguration; +import it.grid.storm.filesystem.FilesystemPermission; +import it.grid.storm.filesystem.LocalFile; +import it.grid.storm.griduser.LocalUser; +import it.grid.storm.namespace.Namespace; +import it.grid.storm.persistence.dao.VolatileAndJiTDAO; +import it.grid.storm.persistence.impl.mysql.VolatileAndJiTDAOMySql; +import it.grid.storm.persistence.model.JiTData; +import it.grid.storm.srm.types.TLifeTimeInSeconds; + /** - * This catalog holds all info needed to pin files for JiT ACL tracking, and - * for keeping track of Volatile files. pinLifetime is the time Jit ACLs will be - * in place: upon expiry ACLs are removed; fileLifetime is the time Volatile - * files will remain in the system: upon expiry those files are removed. In - * particular the srmPrepareToPut analyzes the request and if the specified file - * is set to Volatile, then it calls on the catalog to add the corresponding - * entry for the given fileLifetime. If StoRM is configured for JiT, another - * method is invoked to add an entry to keep track of the ACLs for the desired - * pinLifetime. For srmPrepareToGet, only if StoRM is configured for JiT ACLs - * then a method is invoked to add the corresponding entry for the given - * pinLifetime. Repeatedly putting the same Volatile file, will overwrite - * existing fileLifetime only if the overwrite option allows file overwriting. - * If JiT is enabled and it is a new user that is putting again the same file - * in, a new pinLifetime entry is added; but if it is the same user, the - * pinLifetime WILL be changed provided the new expiry exceeds the current one! - * Repeatedly invoking PtG on the same file behaves similarly: different users - * will have their own pinLifetime record, but the same user WILL change the - * pinLifetime provided the new expiry exceeds the current one! In case the - * pinLifetime exceeds the fileLifetime, the fileLifetime is used as ceiling. - * This may occur when a file is Put and defined Volatile, but with a - * pinLifetime that is longer than that of the pin. Or if _subsequent_ calls to - * PtG specify a pinLifetime that lasts longer. To be more precise, the - * pinLifetime gets recorded as requested, but upon expiry of the volatile entry - * any associated acl will get removed as well, regardless of the acl expiry. - * When lifetime expires: volatile files get erased from the system and their - * entries in the catalog are removed; tracked ACLs get removed from the files - * WITHOUT erasing the files, and their entries in the catalog are removed; - * finally for Volatile files with ACLs set up on them, the ACLs are removed AND - * the files are erased, also cleaning up the catalog. As a last note, the - * catalog checks periodically its entries for any expired ones, and then - * proceeds with purging; this frequency of cleaning is specified in a - * configuration parameter, and the net effect is that the pinning/volatile may - * actually last longer (but never less) because the self cleaning mechanism is - * active only at those predetermined times. + * This catalog holds all info needed to pin files for JiT ACL tracking, and for keeping track of + * Volatile files. pinLifetime is the time JiT ACLs will be in place: upon expiration ACLs are removed; + * fileLifetime is the time Volatile files will remain in the system: upon expiration those files are + * removed. In particular the srmPrepareToPut analyzes the request and if the specified file is set + * to Volatile, then it calls on the catalog to add the corresponding entry for the given + * fileLifetime. If StoRM is configured for JiT, another method is invoked to add an entry to keep + * track of the ACLs for the desired pinLifetime. For srmPrepareToGet, only if StoRM is configured + * for JiT ACLs then a method is invoked to add the corresponding entry for the given pinLifetime. + * Repeatedly putting the same Volatile file, will overwrite existing fileLifetime only if the + * overwrite option allows file overwriting. If JiT is enabled and it is a new user that is putting + * again the same file in, a new pinLifetime entry is added; but if it is the same user, the + * pinLifetime WILL be changed provided the new expiration exceeds the current one! Repeatedly invoking + * PtG on the same file behaves similarly: different users will have their own pinLifetime record, + * but the same user WILL change the pinLifetime provided the new expiration exceeds the current one! In + * case the pinLifetime exceeds the fileLifetime, the fileLifetime is used as ceiling. This may + * occur when a file is Put and defined Volatile, but with a pinLifetime that is longer than that of + * the pin. Or if _subsequent_ calls to PtG specify a pinLifetime that lasts longer. To be more + * precise, the pinLifetime gets recorded as requested, but upon expiration of the volatile entry any + * associated ACL will get removed as well, regardless of the ACL expiration. When lifetime expires: + * volatile files get erased from the system and their entries in the catalog are removed; tracked + * ACLs get removed from the files WITHOUT erasing the files, and their entries in the catalog are + * removed; finally for Volatile files with ACLs set up on them, the ACLs are removed AND the files + * are erased, also cleaning up the catalog. As a last note, the catalog checks periodically its + * entries for any expired ones, and then proceeds with purging; this frequency of cleaning is + * specified in a configuration parameter, and the net effect is that the pinning/volatile may + * actually last longer (but never less) because the self cleaning mechanism is active only at those + * predetermined times. * * @author EGRID - ICTP Trieste * @version 2.0 @@ -68,536 +65,486 @@ */ public class VolatileAndJiTCatalog { - private static final Logger log = LoggerFactory - .getLogger(VolatileAndJiTCatalog.class); - - /** only instance of Catalog! */ - private static final VolatileAndJiTCatalog cat = new VolatileAndJiTCatalog(); - /** only instance of DAO object! */ - private static final VolatileAndJiTDAO dao = VolatileAndJiTDAO.getInstance(); - /** Timer object in charge of cleaning periodically the Catalog! */ - private final Timer cleaner = new Timer(); - /** Delay time before starting cleaning thread! Set to 1 minute */ - private final long delay = Configuration.getInstance() - .getCleaningInitialDelay() * 1000; - /** Period of execution of cleaning! Set to 1 hour */ - private final long period = Configuration.getInstance() - .getCleaningTimeInterval() * 1000; - /** fileLifetime to use if user specified a non-positive value */ - private final long defaultFileLifetime = Configuration.getInstance() - .getFileLifetimeDefault(); - /** Number of seconds to use as default if the supplied lifetime is zero! */ - private final long floor = Configuration.getInstance() - .getPinLifetimeDefault(); - /** - * Maximum number of seconds that an ACL can live: the life time requested by - * the user cannot be greater than this value! This ceiling is needed because - * of the cron job that removes pool account mappings: when the mapping is - * removed, there must NOT be ANY ACL for that pool-user left! - */ - private final long ceiling = Configuration.getInstance() - .getPinLifetimeMaximum(); - - /** - * Private constructor that starts the cleaning timer. - */ - private VolatileAndJiTCatalog() { - - TimerTask cleaningTask = new TimerTask() { - - @Override - public void run() { - - purge(); - } - }; - cleaner.scheduleAtFixedRate(cleaningTask, delay, period); - } - - /** - * Method that returns the only instance of PinnedFilesCatalog. - */ - public static VolatileAndJiTCatalog getInstance() { - - return cat; - } - - /** - * Checks whether the given file exists in the volatile table or not. - * - * @param filename - * @return true if there is antry for the given file in the - * volatilte table, false otherwise. - */ - synchronized public boolean exists(PFN pfn) { - - return dao.exists(pfn.getValue()); - } - - /** - * Method used to expire _all_ related entries in the JiT catalogue, that were - * setup during a PtG operation. The method is intended to be used by code - * handling srmAbort command. Notice that the Traverse on the parents is NOT - * removed! This is to accomodate for the use case of a user that has run many - * PtG on different SURLs but all contained in the same directory tree! In - * practice this method removes the R permission. If any entry does not exist, - * then nothing happens and a warning gets written in the logs; otherwise - * entries get their start time set to now, and the lifetime set to zero; in - * case more than one matching entry is found, a message gets written to the - * logs, and the updating continues anyway as explained. At this point, when - * the garbage collector wakes up the entries get cleanly handled (physical - * ACL is removed, catalog entry removed, etc.); or an earlier cleaning can be - * forced by invoking directly the purge mehod. The method returns FALSE in - * case an entry was not found or the supplied parameters were null, and TRUE - * otherwise. Yet keep in mind that it says nothing of whether the DB - * operation was successful or not. - */ - synchronized public boolean expireGetJiTs(PFN pfn, LocalUser localUser) { - - if (pfn != null && localUser != null) { - return expireJiT(pfn, localUser, FilesystemPermission.Read); - } - log.error("VolatileAndJiT CATALOG: programming bug! expireGetJiTs invoked " - + "on null attributes; pfn={} localUser={}", pfn, localUser); - return false; - } - - /** - * Method used to expire an entry in the JiT catalogue. The method is intended - * to be used by code handling srmAbort command. If the entry does not exist, - * then nothing happens and a warning gets written in the logs; otherwise the - * entry gets its start time set to now, and its lifetime set to zero; in case - * more than one matching entry is found, a message gets written to the logs, - * and the updating continues anyway as explained. At this point, when the - * garbage collector wakes up the entry is cleanly handled (physical ACL is - * removed, catalog entry removed, etc.); or an earlier cleaning can be forced - * by invoking directly the purge method. The method returns FALSE in case no - * entry was found or the supplied parameters were null, and TRUE otherwise. - * Yet keep in mind that is says nothing of whether the DB operation was - * successful or not. - */ - synchronized public boolean expireJiT(PFN pfn, LocalUser localUser, - FilesystemPermission acl) { - - if (pfn != null && localUser != null && acl != null) { - String fileName = pfn.getValue(); - int uid = localUser.getUid(); - int intacl = acl.getInt(); - // from the current time we remove 10 seconds because it was observed - // that when executing purge right after invoking this method, less - // than 1 second elapses, so no purging takes place at all since expiry - // is not yet reached! - // Seconds needed and not milliseconds! - long pinStart = (Calendar.getInstance().getTimeInMillis() / 1000) - 10; - long pinTime = 0; // set to zero the lifetime! - int n = dao.numberJiT(fileName, uid, intacl); - if (n == 0) { - log.warn("VolatileAndJiT CATALOG: expireJiT found no entry for ({}, {}, " - + "{})!", fileName, uid, intacl); - return false; - } - dao.forceUpdateJiT(fileName, uid, intacl, pinStart, pinTime); - if (n > 1) { - log.warn("VolatileAndJiT CATALOG: expireJiT found more than one entry " - + "for ({}, {}, {}); the catalogue could be corrupt!", fileName, uid, intacl); - } - return true; - } - log.error("VolatileAndJiT CATALOG: programming bug! expireJiT invoked on " - + "null attributes; pfn={} localUser={} acl={}", pfn, localUser, acl); - return false; - } - - /** - * Method used to expire _all_ related entries in the JiT catalogue, that were - * setup during a PtP operation. The method is intended to be used by code - * handling srmAbort command, and by srmPutDone. Notice that the Traverse on - * the parents is NOT removed! This is to accomodate for the use case of a - * user that has run many PtP on different SURLs but that are all contained in - * the same directory tree! In practice, this method removes R and W - * permissions. If any entry does not exist, then nothing happens and a - * warning gets written in the logs; otherwise entries get their start time - * set to now, and the lifetime set to zero; in case more than one matching - * entry is found, a message gets written to the logs, and the updating - * continues anyway as explained. At this point, when the garbage collector - * wakes up the entries get cleanly handled (physical ACL is removed, catalog - * entry removed, etc.); or an earlier cleaning can be forced by invoking - * directly the purge mehod. The method returns FALSE in case an entry was not - * found or the supplied parameters were null, and TRUE otherwise. Yet keep in - * mind that is says nothing of whether the DB operation was successful or - * not. - */ - synchronized public boolean expirePutJiTs(PFN pfn, LocalUser localUser) { - - if (pfn != null && localUser != null) { - return expireJiT(pfn, localUser, FilesystemPermission.Read) - && expireJiT(pfn, localUser, FilesystemPermission.Write); - } - - log.error("VolatileAndJiT CATALOG: programming bug! expirePutJiTs invoked " - + "on null attributes; pfn={} localUser={}", pfn, localUser); - return false; - } - - /** - * Method that purges the catalog, removing expired ACLs and deleting expired - * Volatile files. When Volatile entries expire, any realted JiT will - * automatically expire too, regardless of the specified pinLifetime: that is, - * fileLifetime wins over pinLifetime. WARNING! Notice that the catalogue DOES - * get cleaned up even if the physical removal of the ACL or erasing of the - * file fails. - */ - public synchronized void purge() { - - log.debug("VolatileAndJiT CATALOG! Executing purge!"); - Calendar rightNow = Calendar.getInstance(); - /** - * removes all expired entries from storm_pin and storm_track, returning two - * Collections: one with the PFN of Volatile files, and the other with PFN + - * GridUser couple of the entries that were just being tracked for the ACLs - * set up on them. - */ - Collection[] expired = dao.removeExpired(rightNow.getTimeInMillis() / 1000); - Collection expiredVolatile = expired[0]; - Collection expiredJiT = expired[1]; - if (expiredVolatile.size() == 0) { - log.debug("VolatileAndJiT CATALOG! No expired Volatile entries found."); - } else { - log.info("VolatileAndJiT CATALOG! Found and purged the following expired " - + "Volatile entries:\n {}", volatileString(expired[0])); - } - if (expiredJiT.size() == 0) { - log.debug("VolatileAndJiT CATALOG! No JiT entries found."); - } else { - log.info("VolatileAndJiT CATALOG! Found and purged the following expired " - + "JiT ACLs entries:\n {}", jitString(expired[1])); - } - // Remove ACLs - JiTData aux = null; - for (Iterator i = expiredJiT.iterator(); i.hasNext();) { - aux = (JiTData) i.next(); - int jitacl = aux.acl(); - String jitfile = aux.pfn(); - int jituid = aux.uid(); - int jitgid = aux.gid(); - try { - log.info("VolatileAndJiT CATALOG. Removing ACL {} on file {} for " - + "user {},{}", jitacl, jitfile, jituid, jitgid); - LocalFile auxFile = NamespaceDirector.getNamespace() - .resolveStoRIbyPFN(PFN.make(jitfile)).getLocalFile(); - LocalUser auxUser = new LocalUser(jituid, jitgid); - FilesystemPermission auxACL = new FilesystemPermission(jitacl); - - AclManager manager = AclManagerFS.getInstance(); - if (auxFile == null) { - log.warn("VolatileAndJiT CATALOG! Unable to setting up the ACL. " - + "LocalFile is null!"); - } else { - try { - manager.revokeUserPermission(auxFile, auxUser, auxACL); - } catch (IllegalArgumentException e) { - log.error("Unable to revoke user permissions on the file. " - + "IllegalArgumentException: {}", e.getMessage(), e); - } - } - } catch (Exception e) { - log.error("VolatileAndJiT CATALOG! Entry removed from Catalog, but " - + "physical ACL {} for user {}, could NOT be removed from {}", - jitacl, jituid, jitgid, jitfile); - log.error("VolatileAndJiT CATALOG! {}", e.getMessage(), e); - } - } - // Delete files - String auxPFN = null; - for (Iterator i = expiredVolatile.iterator(); i.hasNext();) { - auxPFN = (String) i.next(); - try { - log.info("VolatileAndJiT CATALOG. Deleting file {}", auxPFN); - LocalFile auxFile = NamespaceDirector.getNamespace() - .resolveStoRIbyPFN(PFN.make(auxPFN)).getLocalFile(); - boolean ok = auxFile.delete(); - if (!ok) { - throw new Exception("Java File deletion failed!"); - } - } catch (Exception e) { - log.error("VolatileAndJiT CATALOG! Entry removed from Catalog, but " - + "physical file {} could NOT be deleted!", auxPFN); - log.error("VolatileAndJiT CATALOG! {}", e.getMessage(), e); - } - } - } - - /** - * Method used upon expiry of SRM_SPACE_AVAILABLE to remove all JiT entries in - * the DB table, related to the given PFN; Notice that _no_ distinction is - * made aboutthe specific user! This is because upon expiry of - * SRM_SPACE_AVAILABLE the file gets erased, so all JiTs on that file are - * automatically erased. This implies that all catalogue entries get removed. - * If no entries are present nothing happens. - */ - public synchronized void removeAllJiTsOn(PFN pfn) { - - if (pfn != null) { - dao.removeAllJiTsOn(pfn.getValue()); - return; - } - log.error("VolatileAndJiT CATALOG: programming bug! removeAllJiTsOn " - + "invoked on null pfn!"); - } - - /** - * Method used to remove a Volatile entry that matches the supplied pfn, from - * the DB. If null is supplied, an error message gets logged and nothing - * happens. If PFN is not found, nothing happens and _no_ message gets logged. - */ - public synchronized void removeVolatile(PFN pfn) { - - if (pfn != null) { - dao.removeVolatile(pfn.getValue()); - return; - } - log.warn("VolatileAndJiT CATALOG: programming bug! removeVolatile invoked " - + "on null pfn!"); - } - - /** - * Method used to keep track of an ACL set up on a PFN; it needs the PFN, the - * LocalUser, the ACL and the desired pinLifeTime. If the 3-ple (PFN, ACL, - * LocalUser) is not present, it gets added; if it is already present, - * provided the new desired expiry occurs after the present one, it gets - * changed. If the supplied lifetime is zero, then a default value is used - * instead. If it is larger than a ceiling, that ceiling is used instead. The - * floor value in seconds can be set from the configuration file, with the - * property: pinLifetime.minimum While the ceiling value in seconds is set - * with: pinLifetime.maximum BEWARE: The intended use case is in both - * srmPrepareToGet and srmPrepareToPut, for the case of the _JiT_ security - * mechanism. The maximum is necessary because JiT ACLs cannot last longer - * than the amount of time the pool account is leased. Notice that for - * Volatile entries, a pinLifetime larger than the fileLifetime can be - * specified. However, when Volatile files expire any related JiTs - * automatically expire in anticipation! - */ - public synchronized void trackJiT(PFN pfn, LocalUser localUser, - FilesystemPermission acl, Calendar start, TLifeTimeInSeconds pinLifetime) { - - if (pfn != null && localUser != null && acl != null && start != null - && pinLifetime != null) { - - String fileName = pfn.getValue(); - int uid = localUser.getUid(); - int gid = localUser.getPrimaryGid(); - int intacl = acl.getInt(); - // seconds needed and not milliseconds! - long pinStart = start.getTimeInMillis() / 1000; - long pinTime = validatePinLifetime(pinLifetime.value()); - int n = dao.numberJiT(fileName, uid, intacl); - if (n == 0) { - dao.addJiT(fileName, uid, gid, intacl, pinStart, pinTime); - } else { - dao.updateJiT(fileName, uid, intacl, pinStart, pinTime); - if (n > 1) { - log.warn("VolatileAndJiT CATALOG: More than one entry found for " - + "({}, {}, {}); the catalogue could be corrupt!", fileName, uid, - intacl); - } - } - return; - } - log.error("VolatileAndJiT CATALOG: programming bug! TrackACL invoked on " - + "null attributes; pfn={} localUser={} acl={} start={} pinLifetime={}", - pfn, localUser, acl, start, pinLifetime); - } - - /** - * Method that adds an entry to the catalog that keeps track of Volatile - * files. The PFN and the fileLifetime are needed. If no entry corresponding - * to the given PFN is found, a new one gets recorded. If the PFN is already - * present, then provided the new expiry (obtained by adding together - * current-time and requested-lifetime) exceeds the expiry in the catalog, - * the entry is updated. Otherwise nothing takes place. If the supplied - * fileLifetime is zero, then a default value is used instead. This floor - * default value in seconds can be set from the configuration file, with the - * property: fileLifetime.default BEWARE: The intended use case for this - * method is during srmPrepareToPut. When files are uploaded into StoRM, they - * get specified as Volatile or Permanent. The PtP logic determines if the - * request is for a Volatile file and in that case it adds a new entry in the - * catalog. That is the purpose of this method. Any subsequent PtP call will - * just result in a modification of the expiry, provided the newer one lasts - * longer than the original one. Yet bear in mind that two or more PtP on the - * same file makes NO SENSE AT ALL! If any DB error occurs, then nothing gets - * added/updated and an error message gets logged. - */ - public synchronized void trackVolatile(PFN pfn, Calendar start, - TLifeTimeInSeconds fileLifetime) { - - if (pfn != null && fileLifetime != null && start != null) { - - String fileName = pfn.getValue(); - long fileTime = fileLifetime.value(); - if (fileTime <= 0) { - fileTime = defaultFileLifetime; - } - long fileStart = start.getTimeInMillis() / 1000; // seconds needed and not - // milliseconds! - int n = dao.numberVolatile(fileName); - if (n == -1) { - log.error("VolatileAndJiT CATALOG! DB problem does not allow to count " - + "number of Volatile entries for {}! Volatile entry NOT processed!", - pfn); - } else if (n == 0) { - dao.addVolatile(fileName, fileStart, fileTime); - } else { - dao.updateVolatile(fileName, fileStart, fileTime); - if (n > 1) { - log.warn("VolatileAndJiT CATALOG: More than one entry found for {}; " - + "the catalogue could be corrupt!", fileName); - } - } - return; - } - log.warn("VolatileAndJiT CATALOG: programming bug! volatileEntry invoked " - + "on null attributes; pfn={} start={} fileLifetime={}", pfn, start, - fileLifetime); - } - - public synchronized void setStartTime(PFN pfn, Calendar start) - throws Exception { - - if (pfn == null || start == null) { - log.warn("VolatileAndJiT CATALOG: programming bug! volatileEntry invoked " - + "on null attributes; pfn={} start={}", pfn, start); - return; - } - - String fileName = pfn.getValue(); - // seconds needed and not milliseconds! - long fileStart = start.getTimeInMillis() / 1000; - int n = dao.numberVolatile(fileName); - if (n == -1) { - log.error("VolatileAndJiT CATALOG! DB problem does not allow to count " - + "number of Volatile entries for {}! Volatile entry NOT processed!", - pfn); - return; - } - if (n == 0) { - throw new Exception("Unable to update row volatile for pfn \'" + pfn - + "\' , not on the database!"); - } - dao.updateVolatile(fileName, fileStart); - if (n > 1) { - log.warn("VolatileAndJiT CATALOG: More than one entry found for {}; " - + "the catalogue could be corrupt!", fileName); - } - } - - /** - * Method that returns a List whose first element is a Calendar with the - * starting date and time of the lifetime of the supplied PFN, and whose - * second element is the TLifeTime the system is keeping the PFN. If no entry - * is found for the given PFN, an empty List is returned. Likewise if any DB - * error occurs. In any case, proper error messages get logged. Moreover - * notice that if for any reason the value for the Lifetime read from the DB - * does not allow creation of a valid TLifeTimeInSeconds, an Empty one is - * returned. Error messages in logs warn of the situation. - */ - public synchronized List volatileInfoOn(PFN pfn) { - - ArrayList aux = new ArrayList(); - if (pfn == null) { - log.error("VolatileAndJiT CATALOG: programming bug! volatileInfoOn " - + "invoked on null PFN!"); - return aux; - } - Collection c = dao.volatileInfoOn(pfn.getValue()); - if (c.size() != 2) { - return aux; - } - Iterator i = c.iterator(); - // start time - long startInMillis = i.next().longValue() * 1000; - Calendar auxcal = Calendar.getInstance(); - auxcal.setTimeInMillis(startInMillis); - aux.add(auxcal); - // lifeTime - long lifetimeInSeconds = ((Long) i.next()).longValue(); - TLifeTimeInSeconds auxLifeTime = TLifeTimeInSeconds.makeEmpty(); - try { - auxLifeTime = TLifeTimeInSeconds - .make(lifetimeInSeconds, TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - log.error("VolatileAndJiT CATALOG: programming bug! Retrieved long does " - + "not allow TLifeTimeCreation! long is: {}; error is: {}", - lifetimeInSeconds, e.getMessage(), e); - } - aux.add(auxLifeTime); - return aux; - } - - /** - * Private method used to return a String representation of the expired - * entries Collection of JiTData. - */ - private String jitString(Collection c) { - - if (c == null) { - return ""; - } - StringBuilder sb = new StringBuilder(); - sb.append("file,acl,uid,gid\n"); - JiTData aux = null; - for (Iterator i = c.iterator(); i.hasNext();) { - aux = i.next(); - sb.append(aux.pfn()); - sb.append(","); - sb.append(aux.acl()); - sb.append(","); - sb.append(aux.uid()); - sb.append(","); - sb.append(aux.gid()); - if (i.hasNext()) { - sb.append("\n"); - } - } - return sb.toString(); - } - - /** - * Private method that makes sure that the lifeTime of the request: (1) It is - * not less than a predetermined value: this check is needed because clients - * may omit to supply a value and some default one must be used; moreover, it - * is feared that if the requested lifetime is very low, such as 0 or a few - * seconds, there could be strange problems in having a file written and - * erased immediately. (2) It is not larger than a given ceiling; this is - * necessary because in the JiT model, the underlying system may decide to - * remove the pool account mappings; it is paramount that no ACLs remain set - * up for the now un-associated pool account. - */ - private long validatePinLifetime(long lifetime) { - - long duration = lifetime < floor ? floor : lifetime; // adjust for lifetime - // set to zero! - duration = duration <= ceiling ? duration : ceiling; // make sure lifetime - // is not longer than - // the maximum set! - return duration; - } - - /** - * Private method used to return a String representation of the expired - * entries Collection of pfn Strings. - */ - private String volatileString(Collection c) { - - if (c == null) { - return ""; - } - StringBuilder sb = new StringBuilder(); - for (Iterator i = c.iterator(); i.hasNext();) { - sb.append(i.next()); - if (i.hasNext()) { - sb.append(","); - } - } - return sb.toString(); - } + private static final Logger log = LoggerFactory.getLogger(VolatileAndJiTCatalog.class); + + private static VolatileAndJiTCatalog instance; + + public static synchronized VolatileAndJiTCatalog getInstance() { + if (instance == null) { + instance = new VolatileAndJiTCatalog(); + } + return instance; + } + + private final VolatileAndJiTDAO dao; + + /** Timer object in charge of cleaning periodically the Catalog! */ + private final Timer cleaner = new Timer(); + /** Delay time before starting cleaning thread! Set to 1 minute */ + private final long delay = StormConfiguration.getInstance().getCleaningInitialDelay() * 1000; + /** Period of execution of cleaning! Set to 1 hour */ + private final long period = StormConfiguration.getInstance().getCleaningTimeInterval() * 1000; + /** fileLifetime to use if user specified a non-positive value */ + private final long defaultFileLifetime = StormConfiguration.getInstance().getFileLifetimeDefault(); + /** Number of seconds to use as default if the supplied lifetime is zero! */ + private final long floor = StormConfiguration.getInstance().getPinLifetimeDefault(); + /** + * Maximum number of seconds that an ACL can live: the life time requested by the user cannot be + * greater than this value! This ceiling is needed because of the cron job that removes pool + * account mappings: when the mapping is removed, there must NOT be ANY ACL for that pool-user + * left! + */ + private final long ceiling = StormConfiguration.getInstance().getPinLifetimeMaximum(); + + /** + * Private constructor that starts the cleaning timer. + */ + private VolatileAndJiTCatalog() { + + dao = VolatileAndJiTDAOMySql.getInstance(); + + TimerTask cleaningTask = new TimerTask() { + + @Override + public void run() { + + purge(); + } + }; + cleaner.scheduleAtFixedRate(cleaningTask, delay, period); + } + + /** + * Checks whether the given file exists in the volatile table or not. + * + * @param filename + * @return true if there is antry for the given file in the volatilte table, + * false otherwise. + */ + synchronized public boolean exists(PFN pfn) { + + return dao.exists(pfn.getValue()); + } + + /** + * Method used to expire _all_ related entries in the JiT catalogue, that were setup during a PtG + * operation. The method is intended to be used by code handling srmAbort command. Notice that the + * Traverse on the parents is NOT removed! This is to accomodate for the use case of a user that + * has run many PtG on different SURLs but all contained in the same directory tree! In practice + * this method removes the R permission. If any entry does not exist, then nothing happens and a + * warning gets written in the logs; otherwise entries get their start time set to now, and the + * lifetime set to zero; in case more than one matching entry is found, a message gets written to + * the logs, and the updating continues anyway as explained. At this point, when the garbage + * collector wakes up the entries get cleanly handled (physical ACL is removed, catalog entry + * removed, etc.); or an earlier cleaning can be forced by invoking directly the purge mehod. The + * method returns FALSE in case an entry was not found or the supplied parameters were null, and + * TRUE otherwise. Yet keep in mind that it says nothing of whether the DB operation was + * successful or not. + */ + synchronized public boolean expireGetJiTs(PFN pfn, LocalUser localUser) { + + if (pfn != null && localUser != null) { + return expireJiT(pfn, localUser, FilesystemPermission.Read); + } + log.error("VolatileAndJiT CATALOG: programming bug! expireGetJiTs invoked " + + "on null attributes; pfn={} localUser={}", pfn, localUser); + return false; + } + + /** + * Method used to expire an entry in the JiT catalogue. The method is intended to be used by code + * handling srmAbort command. If the entry does not exist, then nothing happens and a warning gets + * written in the logs; otherwise the entry gets its start time set to now, and its lifetime set + * to zero; in case more than one matching entry is found, a message gets written to the logs, and + * the updating continues anyway as explained. At this point, when the garbage collector wakes up + * the entry is cleanly handled (physical ACL is removed, catalog entry removed, etc.); or an + * earlier cleaning can be forced by invoking directly the purge method. The method returns FALSE + * in case no entry was found or the supplied parameters were null, and TRUE otherwise. Yet keep + * in mind that is says nothing of whether the DB operation was successful or not. + */ + synchronized public boolean expireJiT(PFN pfn, LocalUser localUser, FilesystemPermission acl) { + + if (pfn != null && localUser != null && acl != null) { + String fileName = pfn.getValue(); + int uid = localUser.getUid(); + int intacl = acl.getInt(); + // from the current time we remove 10 seconds because it was observed + // that when executing purge right after invoking this method, less + // than 1 second elapses, so no purging takes place at all since expiry + // is not yet reached! + // Seconds needed and not milliseconds! + long pinStart = (Calendar.getInstance().getTimeInMillis() / 1000) - 10; + long pinTime = 0; // set to zero the lifetime! + int n = dao.numberJiT(fileName, uid, intacl); + if (n == 0) { + log.warn("VolatileAndJiT CATALOG: expireJiT found no entry for ({}, {}, " + "{})!", + fileName, uid, intacl); + return false; + } + dao.forceUpdateJiT(fileName, uid, intacl, pinStart, pinTime); + if (n > 1) { + log.warn("VolatileAndJiT CATALOG: expireJiT found more than one entry " + + "for ({}, {}, {}); the catalogue could be corrupt!", fileName, uid, intacl); + } + return true; + } + log.error("VolatileAndJiT CATALOG: programming bug! expireJiT invoked on " + + "null attributes; pfn={} localUser={} acl={}", pfn, localUser, acl); + return false; + } + + /** + * Method used to expire _all_ related entries in the JiT catalogue, that were setup during a PtP + * operation. The method is intended to be used by code handling srmAbort command, and by + * srmPutDone. Notice that the Traverse on the parents is NOT removed! This is to accomodate for + * the use case of a user that has run many PtP on different SURLs but that are all contained in + * the same directory tree! In practice, this method removes R and W permissions. If any entry + * does not exist, then nothing happens and a warning gets written in the logs; otherwise entries + * get their start time set to now, and the lifetime set to zero; in case more than one matching + * entry is found, a message gets written to the logs, and the updating continues anyway as + * explained. At this point, when the garbage collector wakes up the entries get cleanly handled + * (physical ACL is removed, catalog entry removed, etc.); or an earlier cleaning can be forced by + * invoking directly the purge mehod. The method returns FALSE in case an entry was not found or + * the supplied parameters were null, and TRUE otherwise. Yet keep in mind that is says nothing of + * whether the DB operation was successful or not. + */ + synchronized public boolean expirePutJiTs(PFN pfn, LocalUser localUser) { + + if (pfn != null && localUser != null) { + return expireJiT(pfn, localUser, FilesystemPermission.Read) + && expireJiT(pfn, localUser, FilesystemPermission.Write); + } + + log.error("VolatileAndJiT CATALOG: programming bug! expirePutJiTs invoked " + + "on null attributes; pfn={} localUser={}", pfn, localUser); + return false; + } + + /** + * Method that purges the catalog, removing expired ACLs and deleting expired Volatile files. When + * Volatile entries expire, any realted JiT will automatically expire too, regardless of the + * specified pinLifetime: that is, fileLifetime wins over pinLifetime. WARNING! Notice that the + * catalogue DOES get cleaned up even if the physical removal of the ACL or erasing of the file + * fails. + */ + @SuppressWarnings("unchecked") + public synchronized void purge() { + + log.debug("VolatileAndJiT CATALOG! Executing purge!"); + Calendar rightNow = Calendar.getInstance(); + /** + * removes all expired entries from storm_pin and storm_track, returning two Collections: one + * with the PFN of Volatile files, and the other with PFN + GridUser couple of the entries that + * were just being tracked for the ACLs set up on them. + */ + List expired = dao.removeExpired(rightNow.getTimeInMillis() / 1000); + List expiredVolatile = (List) expired.get(0); + List expiredJiT = (List) expired.get(1); + if (expiredVolatile.size() == 0) { + log.debug("VolatileAndJiT CATALOG! No expired Volatile entries found."); + } else { + log.info("VolatileAndJiT CATALOG! Found and purged the following expired " + + "Volatile entries:\n {}", volatileString(expiredVolatile)); + } + if (expiredJiT.size() == 0) { + log.debug("VolatileAndJiT CATALOG! No JiT entries found."); + } else { + log.info("VolatileAndJiT CATALOG! Found and purged the following expired " + + "JiT ACLs entries:\n {}", jitString(expiredJiT)); + } + // Remove ACLs + JiTData aux = null; + for (Iterator i = expiredJiT.iterator(); i.hasNext();) { + aux = (JiTData) i.next(); + int jitacl = aux.acl(); + String jitfile = aux.pfn(); + int jituid = aux.uid(); + int jitgid = aux.gid(); + try { + log.info("VolatileAndJiT CATALOG. Removing ACL {} on file {} for " + "user {},{}", jitacl, + jitfile, jituid, jitgid); + LocalFile auxFile = + Namespace.getInstance().resolveStoRIbyPFN(PFN.make(jitfile)).getLocalFile(); + LocalUser auxUser = new LocalUser(jituid, jitgid); + FilesystemPermission auxACL = new FilesystemPermission(jitacl); + + AclManager manager = AclManagerFS.getInstance(); + if (auxFile == null) { + log.warn("VolatileAndJiT CATALOG! Unable to setting up the ACL. " + "LocalFile is null!"); + } else { + try { + manager.revokeUserPermission(auxFile, auxUser, auxACL); + } catch (IllegalArgumentException e) { + log.error( + "Unable to revoke user permissions on the file. " + "IllegalArgumentException: {}", + e.getMessage(), e); + } + } + } catch (Exception e) { + log.error( + "VolatileAndJiT CATALOG! Entry removed from Catalog, but " + + "physical ACL {} for user {}, could NOT be removed from {}", + jitacl, jituid, jitgid, jitfile); + log.error("VolatileAndJiT CATALOG! {}", e.getMessage(), e); + } + } + // Delete files + String auxPFN = null; + for (Iterator i = expiredVolatile.iterator(); i.hasNext();) { + auxPFN = (String) i.next(); + try { + log.info("VolatileAndJiT CATALOG. Deleting file {}", auxPFN); + LocalFile auxFile = + Namespace.getInstance().resolveStoRIbyPFN(PFN.make(auxPFN)).getLocalFile(); + boolean ok = auxFile.delete(); + if (!ok) { + throw new Exception("Java File deletion failed!"); + } + } catch (Exception e) { + log.error("VolatileAndJiT CATALOG! Entry removed from Catalog, but " + + "physical file {} could NOT be deleted!", auxPFN); + log.error("VolatileAndJiT CATALOG! {}", e.getMessage(), e); + } + } + } + + /** + * Method used upon expiry of SRM_SPACE_AVAILABLE to remove all JiT entries in the DB table, + * related to the given PFN; Notice that _no_ distinction is made aboutthe specific user! This is + * because upon expiry of SRM_SPACE_AVAILABLE the file gets erased, so all JiTs on that file are + * automatically erased. This implies that all catalogue entries get removed. If no entries are + * present nothing happens. + */ + public synchronized void removeAllJiTsOn(PFN pfn) { + + if (pfn != null) { + dao.removeAllJiTsOn(pfn.getValue()); + return; + } + log.error("VolatileAndJiT CATALOG: programming bug! removeAllJiTsOn " + "invoked on null pfn!"); + } + + /** + * Method used to keep track of an ACL set up on a PFN; it needs the PFN, the LocalUser, the ACL + * and the desired pinLifeTime. If the 3-ple (PFN, ACL, LocalUser) is not present, it gets added; + * if it is already present, provided the new desired expiry occurs after the present one, it gets + * changed. If the supplied lifetime is zero, then a default value is used instead. If it is + * larger than a ceiling, that ceiling is used instead. The floor value in seconds can be set from + * the configuration file, with the property: pinLifetime.minimum While the ceiling value in + * seconds is set with: pinLifetime.maximum BEWARE: The intended use case is in both + * srmPrepareToGet and srmPrepareToPut, for the case of the _JiT_ security mechanism. The maximum + * is necessary because JiT ACLs cannot last longer than the amount of time the pool account is + * leased. Notice that for Volatile entries, a pinLifetime larger than the fileLifetime can be + * specified. However, when Volatile files expire any related JiTs automatically expire in + * anticipation! + */ + public synchronized void trackJiT(PFN pfn, LocalUser localUser, FilesystemPermission acl, + Calendar start, TLifeTimeInSeconds pinLifetime) { + + if (pfn != null && localUser != null && acl != null && start != null && pinLifetime != null) { + + String fileName = pfn.getValue(); + int uid = localUser.getUid(); + int gid = localUser.getPrimaryGid(); + int intacl = acl.getInt(); + // seconds needed and not milliseconds! + long pinStart = start.getTimeInMillis() / 1000; + long pinTime = validatePinLifetime(pinLifetime.value()); + int n = dao.numberJiT(fileName, uid, intacl); + if (n == 0) { + dao.addJiT(fileName, uid, gid, intacl, pinStart, pinTime); + } else { + dao.updateJiT(fileName, uid, intacl, pinStart, pinTime); + if (n > 1) { + log.warn("VolatileAndJiT CATALOG: More than one entry found for " + + "({}, {}, {}); the catalogue could be corrupt!", fileName, uid, intacl); + } + } + return; + } + log.error( + "VolatileAndJiT CATALOG: programming bug! TrackACL invoked on " + + "null attributes; pfn={} localUser={} acl={} start={} pinLifetime={}", + pfn, localUser, acl, start, pinLifetime); + } + + /** + * Method that adds an entry to the catalog that keeps track of Volatile files. The PFN and the + * fileLifetime are needed. If no entry corresponding to the given PFN is found, a new one gets + * recorded. If the PFN is already present, then provided the new expiry (obtained by adding + * together current-time and requested-lifetime) exceeds the expiry in the catalog, the entry is + * updated. Otherwise nothing takes place. If the supplied fileLifetime is zero, then a default + * value is used instead. This floor default value in seconds can be set from the configuration + * file, with the property: fileLifetime.default BEWARE: The intended use case for this method is + * during srmPrepareToPut. When files are uploaded into StoRM, they get specified as Volatile or + * Permanent. The PtP logic determines if the request is for a Volatile file and in that case it + * adds a new entry in the catalog. That is the purpose of this method. Any subsequent PtP call + * will just result in a modification of the expiry, provided the newer one lasts longer than the + * original one. Yet bear in mind that two or more PtP on the same file makes NO SENSE AT ALL! If + * any DB error occurs, then nothing gets added/updated and an error message gets logged. + */ + public synchronized void trackVolatile(PFN pfn, Calendar start, TLifeTimeInSeconds fileLifetime) { + + if (pfn != null && fileLifetime != null && start != null) { + + String fileName = pfn.getValue(); + long fileTime = fileLifetime.value(); + if (fileTime <= 0) { + fileTime = defaultFileLifetime; + } + long fileStart = start.getTimeInMillis() / 1000; // seconds needed and not + // milliseconds! + int n = dao.numberVolatile(fileName); + if (n == -1) { + log.error("VolatileAndJiT CATALOG! DB problem does not allow to count " + + "number of Volatile entries for {}! Volatile entry NOT processed!", pfn); + } else if (n == 0) { + dao.addVolatile(fileName, fileStart, fileTime); + } else { + dao.updateVolatile(fileName, fileStart, fileTime); + if (n > 1) { + log.warn("VolatileAndJiT CATALOG: More than one entry found for {}; " + + "the catalogue could be corrupt!", fileName); + } + } + return; + } + log.warn("VolatileAndJiT CATALOG: programming bug! volatileEntry invoked " + + "on null attributes; pfn={} start={} fileLifetime={}", pfn, start, fileLifetime); + } + + public synchronized void setStartTime(PFN pfn, Calendar start) throws Exception { + + if (pfn == null || start == null) { + log.warn("VolatileAndJiT CATALOG: programming bug! volatileEntry invoked " + + "on null attributes; pfn={} start={}", pfn, start); + return; + } + + String fileName = pfn.getValue(); + // seconds needed and not milliseconds! + long fileStart = start.getTimeInMillis() / 1000; + int n = dao.numberVolatile(fileName); + if (n == -1) { + log.error("VolatileAndJiT CATALOG! DB problem does not allow to count " + + "number of Volatile entries for {}! Volatile entry NOT processed!", pfn); + return; + } + if (n == 0) { + throw new Exception( + "Unable to update row volatile for pfn \'" + pfn + "\' , not on the database!"); + } + dao.updateVolatile(fileName, fileStart); + if (n > 1) { + log.warn("VolatileAndJiT CATALOG: More than one entry found for {}; " + + "the catalogue could be corrupt!", fileName); + } + } + + /** + * Method that returns a List whose first element is a Calendar with the starting date and time of + * the lifetime of the supplied PFN, and whose second element is the TLifeTime the system is + * keeping the PFN. If no entry is found for the given PFN, an empty List is returned. Likewise if + * any DB error occurs. In any case, proper error messages get logged. Moreover notice that if for + * any reason the value for the Lifetime read from the DB does not allow creation of a valid + * TLifeTimeInSeconds, an Empty one is returned. Error messages in logs warn of the situation. + */ + public synchronized List volatileInfoOn(PFN pfn) { + + List aux = Lists.newArrayList(); + if (pfn == null) { + log + .error("VolatileAndJiT CATALOG: programming bug! volatileInfoOn " + "invoked on null PFN!"); + return aux; + } + Collection c = dao.volatileInfoOn(pfn.getValue()); + if (c.size() != 2) { + return aux; + } + Iterator i = c.iterator(); + // start time + long startInMillis = i.next().longValue() * 1000; + Calendar auxcal = Calendar.getInstance(); + auxcal.setTimeInMillis(startInMillis); + aux.add(auxcal); + // lifeTime + long lifetimeInSeconds = ((Long) i.next()).longValue(); + TLifeTimeInSeconds auxLifeTime = TLifeTimeInSeconds.makeEmpty(); + try { + auxLifeTime = TLifeTimeInSeconds.make(lifetimeInSeconds, TimeUnit.SECONDS); + } catch (IllegalArgumentException e) { + log.error( + "VolatileAndJiT CATALOG: programming bug! Retrieved long does " + + "not allow TLifeTimeCreation! long is: {}; error is: {}", + lifetimeInSeconds, e.getMessage(), e); + } + aux.add(auxLifeTime); + return aux; + } + + /** + * Private method used to return a String representation of the expired entries Collection of + * JiTData. + */ + private String jitString(Collection c) { + + if (c == null) { + return ""; + } + StringBuilder sb = new StringBuilder(); + sb.append("file,acl,uid,gid\n"); + JiTData aux = null; + for (Iterator i = c.iterator(); i.hasNext();) { + aux = i.next(); + sb.append(aux.pfn()); + sb.append(","); + sb.append(aux.acl()); + sb.append(","); + sb.append(aux.uid()); + sb.append(","); + sb.append(aux.gid()); + if (i.hasNext()) { + sb.append("\n"); + } + } + return sb.toString(); + } + + /** + * Private method that makes sure that the lifeTime of the request: (1) It is not less than a + * predetermined value: this check is needed because clients may omit to supply a value and some + * default one must be used; moreover, it is feared that if the requested lifetime is very low, + * such as 0 or a few seconds, there could be strange problems in having a file written and erased + * immediately. (2) It is not larger than a given ceiling; this is necessary because in the JiT + * model, the underlying system may decide to remove the pool account mappings; it is paramount + * that no ACLs remain set up for the now un-associated pool account. + */ + private long validatePinLifetime(long lifetime) { + + long duration = lifetime < floor ? floor : lifetime; // adjust for lifetime + // set to zero! + duration = duration <= ceiling ? duration : ceiling; // make sure lifetime + // is not longer than + // the maximum set! + return duration; + } + + /** + * Private method used to return a String representation of the expired entries Collection of pfn + * Strings. + */ + private String volatileString(List c) { + + if (c == null) { + return ""; + } + StringBuilder sb = new StringBuilder(); + for (Iterator i = c.iterator(); i.hasNext();) { + sb.append(i.next()); + if (i.hasNext()) { + sb.append(","); + } + } + return sb.toString(); + } } diff --git a/src/main/java/it/grid/storm/catalogs/VolatileAndJiTDAO.java b/src/main/java/it/grid/storm/catalogs/VolatileAndJiTDAO.java deleted file mode 100644 index a117e8054..000000000 --- a/src/main/java/it/grid/storm/catalogs/VolatileAndJiTDAO.java +++ /dev/null @@ -1,876 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs; - -import com.google.common.collect.Lists; - -import it.grid.storm.config.Configuration; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.Timer; -import java.util.TimerTask; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * DAO class for VolatileAndJiTCatalog: it has been specifically designed for - * MySQL. - * - * @author EGRID ICTP - * @version 1.0 (based on old PinnedFilesDAO) - * @date November, 2006 - */ -public class VolatileAndJiTDAO { - - private static final Logger log = LoggerFactory.getLogger(VolatileAndJiTDAO.class); - - // The name of the class for the DB driver - private final String driver = Configuration.getInstance().getDBDriver(); - - // The URL of the DB - private final String url = Configuration.getInstance().getStormDbURL(); - - // The password for the DB - private final String password = Configuration.getInstance().getDBPassword(); - - // The name for the DB - private final String name = Configuration.getInstance().getDBUserName(); - - // Connection to DB - private Connection con = null; - - // instance of DAO - private static final VolatileAndJiTDAO dao = new VolatileAndJiTDAO(); - - // timer thread that will run a task to alert when reconnecting is necessary! - private Timer clock = null; - - // timer task that will update the boolean signaling that a reconnection is needed! - private TimerTask clockTask = null; - - // milliseconds that must pass before reconnecting to DB - private final long period = Configuration.getInstance().getDBReconnectPeriod() * 1000; - - // initial delay in milliseconds before starting timer - private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000; - - // boolean that tells whether reconnection is needed because of MySQL bug! - private boolean reconnect = false; - - private VolatileAndJiTDAO() { - - setUpConnection(); - clock = new Timer(); - clockTask = new TimerTask() { - - @Override - public void run() { - - reconnect = true; - } - }; // clock task - clock.scheduleAtFixedRate(clockTask, delay, period); - } - - /** - * Method that returns the only instance of VolatileAndJiTDAO. - */ - public static VolatileAndJiTDAO getInstance() { - - return dao; - } - - /** - * Method that inserts a new entry in the JiT table of the DB, consisting of - * the specified filename, the local user uid, the local user gid, the acl, - * the start time as expressed by UNIX epoch (seconds since 00:00:00 1 1 1970) - * and the number of seconds the jit must last. - * - * In the DB, the start time gets translated into DATE:TIME in order to make - * it more readable. pinLifetime remains in seconds. - */ - public void addJiT(String filename, int uid, int gid, int acl, long start, - long pinLifetime) { - - if (!checkConnection()) { - log - .error("VolatileAndJiTDAO. addJiT: unable to get a valid connection!"); - return; - } - String sql = "INSERT INTO jit(file,uid,gid,acl,start,pinLifetime) VALUES(?,?,?,?,FROM_UNIXTIME(?),?)"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - stmt.setInt(2, uid); - logWarnings(stmt.getWarnings()); - stmt.setInt(3, gid); - logWarnings(stmt.getWarnings()); - stmt.setInt(4, acl); - logWarnings(stmt.getWarnings()); - stmt.setLong(5, start); - logWarnings(stmt.getWarnings()); - stmt.setLong(6, pinLifetime); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. addJiT: {}", stmt); - stmt.execute(); - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in addJiT: {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method that inserts a new entry in the Volatile table of the DB, consisting - * of the specified filename, the start time as expressed by UNIX epoch - * (seconds since 00:00:00 1 1 1970), and the number of seconds the file must - * be kept for. - * - * In the DB, the start time gets translated into DATE:TIME in order to make - * it more readable. pinLifetime remains in seconds. - */ - public void addVolatile(String filename, long start, long fileLifetime) { - - if (!checkConnection()) { - log - .error("VolatileAndJiTDAO. addVolatile: unable to get a valid connection!"); - return; - } - String sql = "INSERT INTO volatile(file,start,fileLifetime) VALUES(?,FROM_UNIXTIME(?),?)"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - stmt.setLong(2, start); - logWarnings(stmt.getWarnings()); - stmt.setLong(3, fileLifetime); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. addVolatile: {}", stmt); - stmt.execute(); - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in addVolatile: {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Checks whether the given file exists in the volatile table or not. - * - * @param filename - * @return true if there is antry for the given file in the - * volatilte table, false otherwise. - */ - public boolean exists(String filename) { - - if (!checkConnection()) { - log - .error("VolatileAndJiTDAO. exists: unable to get a valid connection!"); - return false; - } - String sql = "SELECT ID FROM volatile WHERE file=? LIMIT 1"; - PreparedStatement stmt = null; - ResultSet rs = null; - boolean result; - - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - - log.debug("VolatileAndJiTDAO - existsOnVolatile - {}", stmt); - - rs = stmt.executeQuery(); - logWarnings(stmt.getWarnings()); - - if (rs.next()) { - result = true; - } else { - result = false; - } - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in existsOnVolatile: {}", - e.getMessage(), e); - result = false; - } finally { - close(rs); - close(stmt); - } - return result; - } - - /** - * Method that updates an existing entry in the JiT table of the DB, - * consisting of the specified filename, the uid and gid of the local user, - * the acl, the start time as expressed by UNIX epoch (seconds since 00:00:00 - * 1 1 1970), and the number of seconds the jit must last. - * - * In the DB, the start time gets translated into DATE:TIME in order to make - * it more readable. pinLifetime remains in seconds. - * - * This method _forces_ the update regardless of the fact that the new expiry - * lasts less than the current one! This method is intended to be used by - * expireJiT. - * - * Only start and pinLifetime get updated, while filename, uid, gid and acl, - * are used as criteria to select records. - */ - public void forceUpdateJiT(String filename, int uid, int acl, long start, - long pinLifetime) { - - if (!checkConnection()) { - log - .error("VolatileAndJiTDAO. forceUpdateJiT: unable to get a valid connection!"); - return; - } - String sql = "UPDATE jit " + "SET start=FROM_UNIXTIME(?), pinLifetime=? " - + "WHERE file=? AND uid=? AND acl=?"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setLong(1, start); - logWarnings(stmt.getWarnings()); - stmt.setLong(2, pinLifetime); - logWarnings(stmt.getWarnings()); - stmt.setString(3, filename); - logWarnings(stmt.getWarnings()); - stmt.setInt(4, uid); - logWarnings(stmt.getWarnings()); - stmt.setInt(5, acl); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. forceUpdateJiT: {}", stmt); - int n = stmt.executeUpdate(); - log.debug("VolatileAndJiTDAO. {} jit entries forced updated.", n); - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in forceUpdateJiT: {}", - e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method that returns the number of entries in the catalogue, matching the - * given filename, uid and acl. - * - * Notice that in general there should be either one or none, and more should - * be taken as indication of catalogue corruption. - * - * -1 is returned if there are problems with the DB. - */ - public int numberJiT(String filename, int uid, int acl) { - - if (!checkConnection()) { - log.error("VolatileAndJiTDAO. numberJiT: unable to get a valid connection!"); - return -1; - } - String sql = "SELECT COUNT(ID) FROM jit WHERE file=? AND uid=? AND acl=?"; - PreparedStatement stmt = null; - ResultSet rs = null; - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - stmt.setInt(2, uid); - logWarnings(stmt.getWarnings()); - stmt.setInt(3, acl); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. numberJiT: {}", stmt); - rs = stmt.executeQuery(); - logWarnings(stmt.getWarnings()); - int n = -1; - if (rs.next()) { - n = rs.getInt(1); - } else { - log.error("VolatileAndJiTDAO! Unexpected situation in numberJiT: " - + "result set empty!"); - } - close(rs); - close(stmt); - return n; - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in numberJiT: {}", e.getMessage(), e); - close(rs); - close(stmt); - return -1; - } - } - - /** - * Method that returns the number of Volatile entries in the catalogue, for - * the given filename. - * - * Notice that in general there should be either one or none, and more should - * be taken as indication of catalogue corruption. - * - * -1 is returned if there are problems with the DB. - */ - public int numberVolatile(String filename) { - - if (!checkConnection()) { - log - .error("VolatileAndJiTDAO. numberVolatile: unable to get a valid connection!"); - return -1; - } - String sql = "SELECT COUNT(ID) FROM volatile WHERE file=?"; - PreparedStatement stmt = null; - ResultSet rs = null; - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. numberVolatile: {}", stmt); - rs = stmt.executeQuery(); - logWarnings(stmt.getWarnings()); - int n = -1; - if (rs.next()) { - n = rs.getInt(1); - } else { - log.error("VolatileAndJiTDAO! Unexpected situation in numberVolatile: " - + "result set empty!"); - } - close(rs); - close(stmt); - return n; - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in numberVolatile: {}", - e.getMessage(), e); - close(rs); - close(stmt); - return -1; - } - } - - /** - * Method that removes all entries in the JiT table of the DB, that match the - * specified filename. So this action takes place _regardless_ of the user - * that set up the ACL! - */ - public void removeAllJiTsOn(String filename) { - - if (!checkConnection()) { - log.error("VolatileAndJiTDAO. removeAllJiTsOn: unable to get a " - + "valid connection!"); - return; - } - String sql = "DELETE FROM jit WHERE file=?"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. removeJiT: {}", stmt); - int n = stmt.executeUpdate(); - log.debug("VolatileAndJiTDAO. removeJiT: {} entries removed", n); - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in removeJiT: {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method used to remove all expired entries, both of pinned files and of jit - * ACLs. Also, when removing volatile entries, any jit entry that refers to - * those expired volatiles will also be removed. - * - * The method requires a long representing the time measured as UNIX EPOCH - * upon which to base the purging: entries are evaluated expired when compared - * to this date. - * - * The method returns an array of two Collections; Collection[0] contains - * expired volatile entries String PFNs, while Collection[1] contains - * JiTDataTO objects. Collection[1] also contains those entries that may not - * have expired yet, but since the respective Volatile is being removed they - * too must be removed automatically. - * - * WARNING! If any error occurs it gets logged, and an array of two empty - * Collection is returned. This operation is treated as a Transcation by the - * DB, so a Roll Back should return everything to its original state! - */ - public Collection[] removeExpired(long time) { - - if (!checkConnection()) { - log.error("VolatileAndJiTDAO. removeExpired: unable to get a valid connection!"); - // in case of any failure return an array of two empty Collection - return new Collection[] { new ArrayList(), new ArrayList() }; - } - - String vol = "SELECT ID,file FROM volatile WHERE (UNIX_TIMESTAMP(start)+fileLifetime 0) { - // there are expired volatile entries: adjust jit selection to include - // those SURLs too! - jit = jit + " OR file IN " + makeFileString(volat); - } - stmt = con.prepareStatement(jit); - logWarnings(con.getWarnings()); - stmt.setLong(1, time); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. removeExpired: {}", stmt); - rs = stmt.executeQuery(); - logWarnings(stmt.getWarnings()); - Collection track = new ArrayList(); - Collection trackid = new ArrayList(); - JiTData aux = null; - while (rs.next()) { - trackid.add(new Long(rs.getLong("ID"))); - aux = new JiTData(rs.getString("file"), rs.getInt("acl"), - rs.getInt("uid"), rs.getInt("gid")); - track.add(aux); - } - int njit = trackid.size(); - close(rs); - close(stmt); - - // remove entries - Collection volcol = new ArrayList(); - Collection jitcol = new ArrayList(); - try { - con.setAutoCommit(false); // begin transaction! - logWarnings(con.getWarnings()); - // delete volatile - int deletedvol = 0; - if (nvolat > 0) { - delvol = delvol + makeIDString(volatid); - stmt = con.prepareStatement(delvol); - logWarnings(con.getWarnings()); - log.debug("VolatileAndJiTDAO. removeExpired: {}", stmt); - deletedvol = stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - close(stmt); - } - // delete jits - int deletedjit = 0; - if (njit > 0) { - deljit = deljit + makeIDString(trackid); - stmt = con.prepareStatement(deljit); - logWarnings(con.getWarnings()); - log.debug("VolatileAndJiTDAO. removeExpired: {}", stmt); - deletedjit = stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - close(stmt); - } - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); // end transaction! - logWarnings(con.getWarnings()); - log.debug("VolatileAndJiTDAO. Removed {} volatile catalogue entries " - + "and {} jit catalogue entries.", deletedvol, deletedjit); - volcol = volat; - jitcol = track; - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Unable to complete removeExpired... " - + "rolling back! {}", e.getMessage(), e); - rollback(con); - close(stmt); - } - - // return collections - return new Collection[] { volcol, jitcol }; - } catch (SQLException e) { - close(rs); - close(stmt); - log.error("VolatileAndJiTDAO! Unable to complete removeExpired! {}", - e.getMessage(), e); - // in case of any failure return an array of two empty Collection - return new Collection[] { new ArrayList(), new ArrayList() }; - } - } - - /** - * Method that removes all entries in the Volatile table of the DB, that match - * the specified filename. - */ - public void removeVolatile(String filename) { - - if (!checkConnection()) { - log.error("VolatileAndJiTDAO. removeVolatile: unable to get a valid " - + "connection!"); - return; - } - String sql = "DELETE FROM volatile WHERE file=?"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. removeVolatile: {}", stmt); - int n = stmt.executeUpdate(); - log.debug("VolatileAndJiTDAO. removeVolatile: {} entries removed.", n); - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in removeVolatile: {}", - e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method that updates an existing entry in the JiT table of the DB, - * consisting of the specified filename, the uid and gid of the local user, - * the acl, the start time as expressed by UNIX epoch (seconds since 00:00:00 - * 1 1 1970), and the number of seconds the jit must last. - * - * In the DB, the start time gets translated into DATE:TIME in order to make - * it more readable. pinLifetime remains in seconds. - * - * Entries get updated only if the new expiry calculated by adding start and - * pinLifetime, is larger than the existing one. - * - * Only start and pinLifetime get updated, while filename, uid, gid and acl, - * are used as criteria to select records. - */ - public void updateJiT(String filename, int uid, int acl, long start, - long pinLifetime) { - - if (!checkConnection()) { - log.error("VolatileAndJiTDAO. updateJiT: unable to get a valid " - + "connection!"); - return; - } - String sql = "UPDATE jit " - + "SET start=FROM_UNIXTIME(?), pinLifetime=? " - + "WHERE file=? AND uid=? AND acl=? AND (UNIX_TIMESTAMP(start)+pinLifetime volatileInfoOn(String filename) { - - if (!checkConnection()) { - log.error("VolatileAndJiTDAO. volatileInfoOn: unable to get a valid connection!"); - return Lists.newArrayList(); - } - String sql = "SELECT UNIX_TIMESTAMP(start), fileLifetime FROM volatile WHERE file=?"; - PreparedStatement stmt = null; - ResultSet rs = null; - List aux = Lists.newArrayList(); - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO - infoOnVolatile - {}", stmt); - rs = stmt.executeQuery(); - logWarnings(stmt.getWarnings()); - if (rs.next()) { - aux.add(rs.getLong("UNIX_TIMESTAMP(start)")); - aux.add(rs.getLong("fileLifetime")); - } else { - log.debug("VolatileAndJiTDAO! infoOnVolatile did not find {}", filename); - } - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in infoOnVolatile: {}", - e.getMessage(), e); - } finally { - close(rs); - close(stmt); - } - return aux; - } - - /** - * Auxiliary method that checks if time for resetting the connection has come, - * and eventually takes it down and up back again. - */ - private boolean checkConnection() { - - boolean response = true; - if (reconnect) { - log.debug("VolatileAndJiTDAO: reconnecting to DB. "); - takeDownConnection(); - response = setUpConnection(); - if (response) { - reconnect = false; - } - } - return response; - } - - /** - * Auxiliary method that closes a ResultSet and handles all possible - * exceptions. - */ - private void close(ResultSet rset) { - - if (rset != null) { - try { - rset.close(); - } catch (Exception e) { - log.error("VolatileAndJiTDAO! Unable to close ResultSet - Error: {}", - e.getMessage(), e); - } - } - } - - /** - * Auxiliary method that closes a Statement and handles all possible - * exceptions. - */ - private void close(Statement stmt) { - - if (stmt != null) { - try { - stmt.close(); - } catch (Exception e) { - log.error("VolatileAndJiTDAO! Unable to close Statement {} - Error: {}", - stmt.toString(), e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to log warnings. - */ - private void logWarnings(SQLWarning warning) { - - if (warning != null) { - log.debug("VolatileAndJiTDAO: {}", warning); - while ((warning = warning.getNextWarning()) != null) { - log.debug("VolatileAndJiTDAO: {}", warning); - } - } - } - - /** - * Method that returns a String containing all Files. - */ - private String makeFileString(Collection files) { - - StringBuilder sb = new StringBuilder("("); - for (Iterator i = files.iterator(); i.hasNext();) { - sb.append("'"); - sb.append((String) i.next()); - sb.append("'"); - if (i.hasNext()) { - sb.append(","); - } - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns a String containing all IDs. - */ - private String makeIDString(Collection rowids) { - - StringBuilder sb = new StringBuilder("("); - for (Iterator i = rowids.iterator(); i.hasNext();) { - sb.append(i.next()); - if (i.hasNext()) { - sb.append(","); - } - } - sb.append(")"); - return sb.toString(); - } - - /** - * Auxiliary method used to roll back a transaction and handles all possible - * exceptions. - */ - private void rollback(Connection con) { - - if (con != null) { - try { - con.rollback(); - logWarnings(con.getWarnings()); - log.error("VolatileAndJiTDAO! Roll back successful!"); - } catch (SQLException e3) { - log.error("VolatileAndJiTDAO! Roll back failed! {}", e3.getMessage(), e3); - } - } - } - - /** - * Auxiliary method that sets up the connection to the DB. - */ - private boolean setUpConnection() { - - boolean response = false; - try { - Class.forName(driver); - con = DriverManager.getConnection(url, name, password); - response = con.isValid(0); - logWarnings(con.getWarnings()); - } catch (Exception e) { - log.error("VolatileAndJiTDAO! Exception in setUpconnection! {}", - e.getMessage(), e); - } - return response; - } - - /** - * Auxiliary method that takes down a connection to the DB. - */ - private void takeDownConnection() { - - if (con != null) { - try { - con.close(); - } catch (Exception e) { - log.error("VolatileAndJiTDAO! Exception in takeDownConnection! {}", - e.getMessage(), e); - } - } - } -} diff --git a/src/main/java/it/grid/storm/catalogs/executors/RequestFinalizerService.java b/src/main/java/it/grid/storm/catalogs/executors/RequestFinalizerService.java new file mode 100644 index 000000000..538283dbf --- /dev/null +++ b/src/main/java/it/grid/storm/catalogs/executors/RequestFinalizerService.java @@ -0,0 +1,46 @@ +package it.grid.storm.catalogs.executors; + +import static java.util.concurrent.TimeUnit.SECONDS; + +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; + +import it.grid.storm.catalogs.executors.threads.BoLFinalizer; +import it.grid.storm.catalogs.executors.threads.PtGFinalizer; +import it.grid.storm.catalogs.executors.threads.PtPFinalizer; +import it.grid.storm.config.StormConfiguration; + +public class RequestFinalizerService { + + private final long delay; + private final long period; + + private ScheduledExecutorService executor; + private PtPFinalizer ptpTask; + private BoLFinalizer bolTask; + private PtGFinalizer ptgTask; + + public RequestFinalizerService(StormConfiguration config) { + + delay = config.getTransitInitialDelay() * 1000L; + period = config.getTransitTimeInterval() * 1000L; + executor = Executors.newScheduledThreadPool(3); + ptpTask = new PtPFinalizer(config.getInProgressPutRequestExpirationTime()); + bolTask = new BoLFinalizer(config.getInProgressBolRequestExpirationTime()); + ptgTask = new PtGFinalizer(); + + } + + public void start() { + + executor.scheduleAtFixedRate(ptpTask, delay, period, SECONDS); + executor.scheduleAtFixedRate(bolTask, delay, period, SECONDS); + executor.scheduleAtFixedRate(ptgTask, delay, period, SECONDS); + + } + + public void stop() { + + executor.shutdown(); + } +} diff --git a/src/main/java/it/grid/storm/catalogs/executors/threads/BoLFinalizer.java b/src/main/java/it/grid/storm/catalogs/executors/threads/BoLFinalizer.java new file mode 100644 index 000000000..5e7229e04 --- /dev/null +++ b/src/main/java/it/grid/storm/catalogs/executors/threads/BoLFinalizer.java @@ -0,0 +1,51 @@ +package it.grid.storm.catalogs.executors.threads; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import it.grid.storm.persistence.dao.BoLChunkDAO; +import it.grid.storm.persistence.impl.mysql.BoLChunkDAOMySql; + +public class BoLFinalizer implements Runnable { + + private static final Logger log = LoggerFactory.getLogger(BoLFinalizer.class); + + private final BoLChunkDAO dao; + private final long inProgressRequestsExpirationTime; + + public BoLFinalizer(long inProgressRequestsExpirationTime) { + + dao = BoLChunkDAOMySql.getInstance(); + this.inProgressRequestsExpirationTime = inProgressRequestsExpirationTime; + } + + @Override + public void run() { + + log.debug("BoL finalizer started .."); + log.debug("Search for SRM_SUCCESS bol request to be moved to SRM_RELEASED .."); + int nReleased = 0; + try { + nReleased = dao.releaseExpiredAndSuccessfulRequests(); + } catch (Throwable e) { + log.error("{}: {}", e.getClass(), e.getMessage(), e); + } finally { + if (nReleased > 0) { + log.info("Released {} expired and successful BoL requests", nReleased); + } + log.debug("Search for SRM_SUCCESS bol request to be moved to SRM_RELEASED .. DONE"); + } + log.debug("Search for SRM_REQUEST_INPROGRESS bol request to be moved to SRM_ABORTED .."); + int nAborted = 0; + try { + nAborted = dao.releaseExpiredAndSuccessfulRequests(); + } catch (Throwable e) { + log.error("{}: {}", e.getClass(), e.getMessage(), e); + } finally { + if (nAborted > 0) { + log.info("Aborted {} in-progress BoL requests", nReleased); + } + log.debug("Search for SRM_REQUEST_INPROGRESS bol request to be moved to SRM_ABORTED .. DONE"); + } + } +} diff --git a/src/main/java/it/grid/storm/catalogs/executors/threads/PtGFinalizer.java b/src/main/java/it/grid/storm/catalogs/executors/threads/PtGFinalizer.java new file mode 100644 index 000000000..2b1c88320 --- /dev/null +++ b/src/main/java/it/grid/storm/catalogs/executors/threads/PtGFinalizer.java @@ -0,0 +1,47 @@ +package it.grid.storm.catalogs.executors.threads; + + +import java.util.Collection; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import it.grid.storm.persistence.dao.PtGChunkDAO; +import it.grid.storm.persistence.impl.mysql.PtGChunkDAOMySql; +import it.grid.storm.srm.types.TSURL; + + +public class PtGFinalizer implements Runnable { + + private static final Logger log = LoggerFactory.getLogger(PtGFinalizer.class); + + private final PtGChunkDAO dao; + + public PtGFinalizer() { + + dao = PtGChunkDAOMySql.getInstance(); + } + + @Override + public void run() { + + log.debug("PtG finalizer started .."); + + try { + + Collection surls = dao.transitExpiredSRM_FILE_PINNED(); + + if (surls.size() > 0) { + log.info("Moved {} expired and successful PtG requests to SRM_FILE_PINNED", surls.size()); + log.debug("Released surls:"); + surls.forEach(surl -> { + log.debug("{}", surl); + }); + } + + } catch (Exception e) { + + log.error("{}: {}", e.getClass(), e.getMessage(), e); + } + } +} diff --git a/src/main/java/it/grid/storm/catalogs/executors/threads/PtPFinalizer.java b/src/main/java/it/grid/storm/catalogs/executors/threads/PtPFinalizer.java new file mode 100644 index 000000000..5b1f8471e --- /dev/null +++ b/src/main/java/it/grid/storm/catalogs/executors/threads/PtPFinalizer.java @@ -0,0 +1,88 @@ +package it.grid.storm.catalogs.executors.threads; + + +import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; + +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import it.grid.storm.persistence.dao.PtPChunkDAO; +import it.grid.storm.persistence.impl.mysql.PtPChunkDAOMySql; +import it.grid.storm.srm.types.InvalidTSURLAttributesException; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.synchcall.command.datatransfer.PutDoneCommand; +import it.grid.storm.synchcall.command.datatransfer.PutDoneCommandException; + + +public class PtPFinalizer implements Runnable { + + private static final Logger log = LoggerFactory.getLogger(PtPFinalizer.class); + + private static final String NAME = "Expired-PutRequests-Agent"; + + private long inProgressRequestsExpirationTime; + private final PtPChunkDAO dao; + + public PtPFinalizer(long inProgressRequestsExpirationTime) { + + this.inProgressRequestsExpirationTime = inProgressRequestsExpirationTime; + dao = PtPChunkDAOMySql.getInstance(); + log.info("{} created.", NAME); + } + + @Override + public void run() { + + log.debug("{} run.", NAME); + try { + + transitExpiredLifetimeRequests(); + transitExpiredInProgressRequests(); + + } catch (Exception e) { + + log.error("{}: {}", e.getClass(), e.getMessage(), e); + + } + } + + private void transitExpiredLifetimeRequests() { + + Map expiredRequests = dao.getExpiredSRM_SPACE_AVAILABLE(); + log.debug("{} lifetime-expired requests found ... ", NAME, expiredRequests.size()); + + if (expiredRequests.isEmpty()) { + return; + } + + expiredRequests.entrySet().forEach(e -> executePutDone(e.getKey(), e.getValue())); + + int count = + dao.transitExpiredSRM_SPACE_AVAILABLEtoSRM_FILE_LIFETIME_EXPIRED(expiredRequests.keySet()); + log.info("{} updated expired put requests - {} db rows affected", NAME, count); + } + + private void executePutDone(Long id, String surl) { + + try { + + if (PutDoneCommand.executePutDone(TSURL.makeFromStringValidate(surl))) { + log.info("{} successfully executed a srmPutDone on surl {}", NAME, surl); + } + + } catch (InvalidTSURLAttributesException | PutDoneCommandException e) { + + log.error("{}. Unable to execute PutDone on request with id {} and surl {}: ", NAME, id, surl, + e.getMessage(), e); + } + } + + private void transitExpiredInProgressRequests() { + + int count = dao.transitLongTimeInProgressRequestsToStatus(inProgressRequestsExpirationTime, + SRM_FAILURE, "Request timeout"); + log.debug("{} moved in-progress put requests to failure - {} db rows affected", NAME, count); + } +} diff --git a/src/main/java/it/grid/storm/catalogs/surl/SURLStatusManagerImpl.java b/src/main/java/it/grid/storm/catalogs/surl/SURLStatusManagerImpl.java index 3f1463e31..8cd0a351b 100644 --- a/src/main/java/it/grid/storm/catalogs/surl/SURLStatusManagerImpl.java +++ b/src/main/java/it/grid/storm/catalogs/surl/SURLStatusManagerImpl.java @@ -6,12 +6,13 @@ import it.grid.storm.authz.AuthzException; import it.grid.storm.catalogs.BoLChunkCatalog; -import it.grid.storm.catalogs.CopyChunkCatalog; import it.grid.storm.catalogs.PtGChunkCatalog; import it.grid.storm.catalogs.PtPChunkCatalog; import it.grid.storm.catalogs.RequestSummaryCatalog; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.dao.SURLStatusDAO; +import it.grid.storm.persistence.impl.mysql.SURLStatusDAOMySql; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.srm.types.TRequestToken; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; @@ -26,56 +27,53 @@ public class SURLStatusManagerImpl implements SURLStatusManager { @Override public boolean abortAllGetRequestsForSURL(GridUserInterface user, TSURL surl, - String explanation) { + String explanation) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.abortActivePtGsForSURL(user, surl, explanation); } @Override public boolean abortAllPutRequestsForSURL(GridUserInterface user, TSURL surl, - String explanation) { + String explanation) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.abortActivePtPsForSURL(user, surl, explanation); } @Override - public boolean abortRequest(GridUserInterface user, TRequestToken token, - String explanation) { + public boolean abortRequest(GridUserInterface user, TRequestToken token, String explanation) { RequestSummaryData request = lookupAndCheckRequest(user, token); switch (request.requestType()) { - case PREPARE_TO_GET: - - PtGChunkCatalog.getInstance().updateFromPreviousStatus(token, - TStatusCode.SRM_REQUEST_QUEUED, TStatusCode.SRM_ABORTED, explanation); - break; + case PREPARE_TO_GET: - case PREPARE_TO_PUT: - PtPChunkCatalog.getInstance().updateFromPreviousStatus(token, - TStatusCode.SRM_REQUEST_QUEUED, TStatusCode.SRM_ABORTED, explanation); - break; + PtGChunkCatalog.getInstance() + .updateFromPreviousStatus(token, TStatusCode.SRM_REQUEST_QUEUED, TStatusCode.SRM_ABORTED, + explanation); + break; - case BRING_ON_LINE: - BoLChunkCatalog.getInstance().updateFromPreviousStatus(token, - TStatusCode.SRM_REQUEST_QUEUED, TStatusCode.SRM_ABORTED, explanation); - break; + case PREPARE_TO_PUT: + PtPChunkCatalog.getInstance() + .updateFromPreviousStatus(token, TStatusCode.SRM_REQUEST_QUEUED, TStatusCode.SRM_ABORTED, + explanation); + break; - case COPY: - CopyChunkCatalog.getInstance().updateFromPreviousStatus(token, - TStatusCode.SRM_REQUEST_QUEUED, TStatusCode.SRM_ABORTED, explanation); - break; + case BRING_ON_LINE: + BoLChunkCatalog.getInstance() + .updateFromPreviousStatus(token, TStatusCode.SRM_REQUEST_QUEUED, TStatusCode.SRM_ABORTED, + explanation); + break; - case EMPTY: - break; + case EMPTY: + break; - default: - throw new IllegalArgumentException( - "Abort not supported for request type: " + request.requestType()); + default: + throw new IllegalArgumentException( + "Abort not supported for request type: " + request.requestType()); } @@ -83,8 +81,8 @@ public boolean abortRequest(GridUserInterface user, TRequestToken token, } @Override - public boolean abortRequestForSURL(GridUserInterface user, - TRequestToken token, TSURL surl, String explanation) { + public boolean abortRequestForSURL(GridUserInterface user, TRequestToken token, TSURL surl, + String explanation) { RequestSummaryData request = lookupAndCheckRequest(user, token); @@ -109,28 +107,27 @@ public boolean abortRequestForSURL(GridUserInterface user, private void authzCheck(GridUserInterface user, RequestSummaryData request) { if (!request.gridUser().getDn().equals(user.getDn())) { - String errorMsg = String.format("User %s is not authorized to abort " - + "request %s", user.getDn(), request.requestToken()); + String errorMsg = String.format("User %s is not authorized to abort " + "request %s", + user.getDn(), request.requestToken()); throw new AuthzException(errorMsg); } } @Override - public boolean failRequestForSURL(GridUserInterface user, - TRequestToken token, TSURL surl, TStatusCode code, String explanation) { + public boolean failRequestForSURL(GridUserInterface user, TRequestToken token, TSURL surl, + TStatusCode code, String explanation) { RequestSummaryData request = lookupAndCheckRequest(user, token); switch (request.requestType()) { - case PREPARE_TO_PUT: - PtPChunkCatalog.getInstance().updateStatus(token, surl, - TStatusCode.SRM_AUTHORIZATION_FAILURE, explanation); - break; + case PREPARE_TO_PUT: + PtPChunkCatalog.getInstance() + .updateStatus(token, surl, TStatusCode.SRM_AUTHORIZATION_FAILURE, explanation); + break; - default: - throw new IllegalArgumentException("Unsupported request type: " - + request.requestType()); + default: + throw new IllegalArgumentException("Unsupported request type: " + request.requestType()); } @@ -138,62 +135,59 @@ public boolean failRequestForSURL(GridUserInterface user, } @Override - public Map getPinnedSURLsForUser( - GridUserInterface user, List surls) { + public Map getPinnedSURLsForUser(GridUserInterface user, + List surls) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.getPinnedSURLsForUser(user, surls); } @Override - public Map getPinnedSURLsForUser( - GridUserInterface user, TRequestToken token, List surls) { + public Map getPinnedSURLsForUser(GridUserInterface user, + TRequestToken token, List surls) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.getPinnedSURLsForUser(user, token, surls); } @Override - public Map getSURLStatuses(GridUserInterface user, - TRequestToken token) { + public Map getSURLStatuses(GridUserInterface user, TRequestToken token) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.getSURLStatuses(token); } @Override - public Map getSURLStatuses(GridUserInterface user, - TRequestToken token, - List surls) { + public Map getSURLStatuses(GridUserInterface user, TRequestToken token, + List surls) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.getSURLStatuses(token, surls); } @Override public boolean isSURLBusy(TRequestToken requestTokenToExclude, TSURL surl) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.surlHasOngoingPtPs(surl, requestTokenToExclude); } @Override public boolean isSURLBusy(TSURL surl) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.surlHasOngoingPtPs(surl, null); } @Override public boolean isSURLPinned(TSURL surl) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.surlHasOngoingPtGs(surl); } - private RequestSummaryData lookupAndCheckRequest(GridUserInterface user, - TRequestToken token) { + private RequestSummaryData lookupAndCheckRequest(GridUserInterface user, TRequestToken token) { RequestSummaryData request = lookupRequest(token); authzCheck(user, request); @@ -202,12 +196,10 @@ private RequestSummaryData lookupAndCheckRequest(GridUserInterface user, private RequestSummaryData lookupRequest(TRequestToken token) { - RequestSummaryData request = RequestSummaryCatalog.getInstance() - .find(token); + RequestSummaryData request = RequestSummaryCatalog.getInstance().find(token); if (request == null) { - throw new IllegalArgumentException("No request found matching token " - + token); + throw new IllegalArgumentException("No request found matching token " + token); } return request; @@ -216,7 +208,7 @@ private RequestSummaryData lookupRequest(TRequestToken token) { @Override public int markSURLsReadyForRead(TRequestToken token, List surls) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.markSURLsReadyForRead(token, surls); } @@ -224,7 +216,7 @@ public int markSURLsReadyForRead(TRequestToken token, List surls) { @Override public void releaseSURLs(GridUserInterface user, List surls) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); dao.releaseSURLs(user, surls); } @@ -232,7 +224,7 @@ public void releaseSURLs(GridUserInterface user, List surls) { @Override public void releaseSURLs(TRequestToken token, List surls) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); dao.releaseSURLs(token, surls); } diff --git a/src/main/java/it/grid/storm/catalogs/timertasks/ExpiredPutRequestsAgent.java b/src/main/java/it/grid/storm/catalogs/timertasks/ExpiredPutRequestsAgent.java deleted file mode 100644 index 2d7e12ef7..000000000 --- a/src/main/java/it/grid/storm/catalogs/timertasks/ExpiredPutRequestsAgent.java +++ /dev/null @@ -1,97 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs.timertasks; - -import it.grid.storm.catalogs.PtPChunkDAO; -import it.grid.storm.srm.types.InvalidTSURLAttributesException; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.synchcall.command.datatransfer.PutDoneCommand; -import it.grid.storm.synchcall.command.datatransfer.PutDoneCommandException; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.Map; -import java.util.TimerTask; - - -public class ExpiredPutRequestsAgent extends TimerTask { - - private static final Logger log = LoggerFactory.getLogger(ExpiredPutRequestsAgent.class); - - private static final String NAME = "Expired-PutRequests-Agent"; - - private long inProgressRequestsExpirationTime; - - public ExpiredPutRequestsAgent(long inProgressRequestsExpirationTime) { - - this.inProgressRequestsExpirationTime = inProgressRequestsExpirationTime; - log.info("{} created.", NAME); - } - - @Override - public synchronized void run() { - - log.debug("{} run.", NAME); - try { - - transitExpiredLifetimeRequests(); - transitExpiredInProgressRequests(); - - } catch (Exception e) { - - log.error("{}: {}", e.getClass(), e.getMessage(), e); - - } - } - - private void transitExpiredLifetimeRequests() { - - PtPChunkDAO dao = PtPChunkDAO.getInstance(); - Map expiredRequests = dao.getExpiredSRM_SPACE_AVAILABLE(); - log.debug("{} lifetime-expired requests found ... ", NAME, expiredRequests.size()); - - if (expiredRequests.isEmpty()) { - return; - } - - expiredRequests.entrySet().forEach(e -> executePutDone(e.getKey(), e.getValue())); - - int count = dao.transitExpiredSRM_SPACE_AVAILABLEtoSRM_FILE_LIFETIME_EXPIRED( - expiredRequests.keySet()); - log.info("{} updated expired put requests - {} db rows affected", NAME, count); - } - - private void executePutDone(Long id, String surl) { - - try { - - if (PutDoneCommand.executePutDone(TSURL.makeFromStringValidate(surl))) { - log.info("{} successfully executed a srmPutDone on surl {}", NAME, surl); - } - - } catch (InvalidTSURLAttributesException | PutDoneCommandException e) { - - log.error("{}. Unable to execute PutDone on request with id {} and surl {}: ", NAME, id, - surl, e.getMessage(), e); - } - } - - private void transitExpiredInProgressRequests() { - - PtPChunkDAO dao = PtPChunkDAO.getInstance(); - List expiredRequestsIds = - dao.getExpiredSRM_REQUEST_INPROGRESS(inProgressRequestsExpirationTime); - log.debug("{} expired in-progress requests found.", expiredRequestsIds.size()); - - if (expiredRequestsIds.isEmpty()) { - return; - } - - int count = dao.transitExpiredSRM_REQUEST_INPROGRESStoSRM_FAILURE(expiredRequestsIds); - log.info("{} moved in-progress put requests to failure - {} db rows affected", NAME, count); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/timertasks/RequestsGarbageCollector.java b/src/main/java/it/grid/storm/catalogs/timertasks/RequestsGarbageCollector.java index c23d77d51..92dc1fb6f 100644 --- a/src/main/java/it/grid/storm/catalogs/timertasks/RequestsGarbageCollector.java +++ b/src/main/java/it/grid/storm/catalogs/timertasks/RequestsGarbageCollector.java @@ -10,168 +10,161 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import it.grid.storm.catalogs.BoLChunkCatalog; -import it.grid.storm.catalogs.PtGChunkCatalog; -import it.grid.storm.catalogs.RequestSummaryDAO; -import it.grid.storm.config.Configuration; -import it.grid.storm.tape.recalltable.TapeRecallCatalog; +import it.grid.storm.catalogs.TapeRecallCatalog; +import it.grid.storm.config.StormConfiguration; +import it.grid.storm.persistence.dao.RequestSummaryDAO; +import it.grid.storm.persistence.impl.mysql.RequestSummaryDAOMySql; public class RequestsGarbageCollector extends TimerTask { - private static final Logger log = LoggerFactory.getLogger(RequestsGarbageCollector.class); + private static final Logger log = LoggerFactory.getLogger(RequestsGarbageCollector.class); - private final Configuration config = Configuration.getInstance(); - private final RequestSummaryDAO dao = RequestSummaryDAO.getInstance(); - private final PtGChunkCatalog ptgCat = PtGChunkCatalog.getInstance(); - private final BoLChunkCatalog bolCat = BoLChunkCatalog.getInstance(); + private final StormConfiguration config = StormConfiguration.getInstance(); + private final RequestSummaryDAO dao = RequestSummaryDAOMySql.getInstance(); - private Timer handler; - private long delay; + private Timer handler; + private long delay; - public RequestsGarbageCollector(Timer handlerTimer, long delay) { + public RequestsGarbageCollector(Timer handlerTimer, long delay) { - this.delay = delay; - handler = handlerTimer; - } + this.delay = delay; + handler = handlerTimer; + } - @Override - public void run() { + @Override + public void run() { - try { + try { - TGarbageData gd = purgeExpiredRequests(); + TGarbageData gd = purgeExpiredRequests(); - if (gd.getTotalPurged() == 0) { + if (gd.getTotalPurged() == 0) { - log.trace("GARBAGE COLLECTOR didn't find completed requests older than {} seconds", - config.getExpiredRequestTime()); + log.trace("GARBAGE COLLECTOR didn't find completed requests older than {} seconds", + config.getRequestPurgerPeriod()); - } else { + } else { - log.info( - "GARBAGE COLLECTOR removed < {} > completed requests (< {} > recall) older than {} seconds", - gd.getTotalPurgedRequests(), gd.getTotalPurgedRecalls(), - config.getExpiredRequestTime()); + log.info( + "GARBAGE COLLECTOR removed < {} > completed requests (< {} > recall) older than {} seconds", + gd.getTotalPurgedRequests(), gd.getTotalPurgedRecalls(), + config.getRequestPurgerPeriod()); - } + } - long nextDelay = computeNextDelay(gd); + long nextDelay = computeNextDelay(gd); - if (nextDelay != delay) { + if (nextDelay != delay) { - log.info("GARBAGE COLLECTOR: tuning new interval to {} seconds", nextDelay / 1000); - delay = nextDelay; + log.info("GARBAGE COLLECTOR: tuning new interval to {} seconds", nextDelay / 1000); + delay = nextDelay; - } + } - } catch (Exception t) { + } catch (Exception t) { - /* useful to prevent unexpected exceptions that would kill the GC */ - log.error(t.getMessage(), t); + /* useful to prevent unexpected exceptions that would kill the GC */ + log.error(t.getMessage(), t); - } finally { + } finally { - reschedule(); - } - } + reschedule(); + } + } - /** - * Delete from database the completed requests older than a specified and configurable value. - * - * @return A TGarbageData object containing info about the deleted requests - */ - private TGarbageData purgeExpiredRequests() { + /** + * Delete from database the completed requests older than a specified and configurable value. + * + * @return A TGarbageData object containing info about the deleted requests + */ + private TGarbageData purgeExpiredRequests() { - if (!enabled()) { - return TGarbageData.EMPTY; - } + if (!enabled()) { + return TGarbageData.EMPTY; + } - long expirationTime = config.getExpiredRequestTime(); - int purgeSize = config.getPurgeBatchSize(); + long expirationTime = config.getRequestPurgerPeriod(); + int purgeSize = config.getPurgeBatchSize(); - int nRequests = purgeExpiredRequests(expirationTime, purgeSize); - int nRecalls = purgeExpiredRecallRequests(expirationTime, purgeSize); + int nRequests = purgeExpiredRequests(expirationTime, purgeSize); + int nRecalls = purgeExpiredRecallRequests(expirationTime, purgeSize); - return new TGarbageData(nRequests, nRecalls); - } + return new TGarbageData(nRequests, nRecalls); + } - /** - * Check if Garbage Collector is enabled or not. - * - * @return If the purger is enabled. False otherwise. - */ - private boolean enabled() { + /** + * Check if Garbage Collector is enabled or not. + * + * @return If the purger is enabled. False otherwise. + */ + private boolean enabled() { - return config.getExpiredRequestPurging(); - } + return config.getExpiredRequestPurging(); + } - /** - * Method used to purge from db a bunch of completed requests, older than the - * specified @expiredRequestTime. - * - * @param purgeSize The maximum size of the bunch of expired requests that must be deleted - * @param expiredRequestTime The number of seconds after that a request can be considered - * expired - * @return The number of requests involved. - */ - private synchronized int purgeExpiredRequests(long expiredRequestTime, int purgeSize) { + /** + * Method used to purge from db a bunch of completed requests, older than the + * specified @expiredRequestTime. + * + * @param purgeSize The maximum size of the bunch of expired requests that must be deleted + * @param expiredRequestTime The number of seconds after that a request can be considered expired + * @return The number of requests involved. + */ + private synchronized int purgeExpiredRequests(long expiredRequestTime, int purgeSize) { - ptgCat.transitExpiredSRM_FILE_PINNED(); - bolCat.transitExpiredSRM_SUCCESS(); + return dao.purgeExpiredRequests(expiredRequestTime, purgeSize).size(); - return dao.purgeExpiredRequests(expiredRequestTime, purgeSize).size(); + } - } + /** + * Method used to clear a bunch of completed recall requests from database. + * + * @param expirationTime The number of seconds that must pass before considering a request as + * expired + * @param purgeSize The maximum size of the bunch of expired requests that must be deleted + * @return The number of requests involved. + */ + private synchronized int purgeExpiredRecallRequests(long expirationTime, int purgeSize) { - /** - * Method used to clear a bunch of completed recall requests from database. - * - * @param expirationTime The number of seconds that must pass before considering a request as - * expired - * @param purgeSize The maximum size of the bunch of expired requests that must be deleted - * @return The number of requests involved. - */ - private synchronized int purgeExpiredRecallRequests(long expirationTime, int purgeSize) { + return TapeRecallCatalog.getInstance().purgeCatalog(expirationTime, purgeSize); + } - return new TapeRecallCatalog().purgeCatalog(expirationTime, purgeSize); - } + /** + * Compute a new delay. It will be decreased if the number of purged requests is equal to the + * purge.size value. Otherwise, it will be increased until default value. + * + * @return the computed next interval predicted from last removed requests info + */ + private long computeNextDelay(TGarbageData gd) { - /** - * Compute a new delay. It will be decreased if the number of purged requests is equal to the - * purge.size value. Otherwise, it will be increased until default value. - * - * @return the computed next interval predicted from last removed requests info - */ - private long computeNextDelay(TGarbageData gd) { + /* max delay from configuration in milliseconds */ + long maxDelay = config.getRequestPurgerPeriod() * 1000L; + /* min delay accepted in milliseconds */ + long minDelay = 10000L; - /* max delay from configuration in milliseconds */ - long maxDelay = config.getRequestPurgerPeriod() * 1000L; - /* min delay accepted in milliseconds */ - long minDelay = 10000L; + long nextDelay; - long nextDelay; + /* Check purged requests value */ + if (gd.getTotalPurged() >= config.getPurgeBatchSize()) { - /* Check purged requests value */ - if (gd.getTotalPurged() >= config.getPurgeBatchSize()) { + /* bunch size reached: decrease interval */ + nextDelay = Math.max(delay / 2, minDelay); - /* bunch size reached: decrease interval */ - nextDelay = Math.max(delay / 2, minDelay); + } else { - } else { + /* bunch size not reached: increase interval */ + nextDelay = Math.min(delay * 2, maxDelay); - /* bunch size not reached: increase interval */ - nextDelay = Math.min(delay * 2, maxDelay); + } - } + return nextDelay; + } - return nextDelay; - } - - /** - * Schedule another task after @delay milliseconds. - */ - private void reschedule() { - - handler.schedule(new RequestsGarbageCollector(handler, delay), delay); - } + /** + * Schedule another task after @delay milliseconds. + */ + private void reschedule() { + + handler.schedule(new RequestsGarbageCollector(handler, delay), delay); + } } diff --git a/src/main/java/it/grid/storm/catalogs/timertasks/TGarbageData.java b/src/main/java/it/grid/storm/catalogs/timertasks/TGarbageData.java index a6caccfba..ff5048831 100644 --- a/src/main/java/it/grid/storm/catalogs/timertasks/TGarbageData.java +++ b/src/main/java/it/grid/storm/catalogs/timertasks/TGarbageData.java @@ -6,28 +6,28 @@ public class TGarbageData { - private final int nPurgedRequests; - private final int nPurgedRecalls; + private final int nPurgedRequests; + private final int nPurgedRecalls; - public static final TGarbageData EMPTY = new TGarbageData(0, 0); + public static final TGarbageData EMPTY = new TGarbageData(0, 0); - public TGarbageData(int nPurgedRequests, int nPurgedRecalls) { - this.nPurgedRequests = nPurgedRequests; - this.nPurgedRecalls = nPurgedRecalls; - } + public TGarbageData(int nPurgedRequests, int nPurgedRecalls) { + this.nPurgedRequests = nPurgedRequests; + this.nPurgedRecalls = nPurgedRecalls; + } - public int getTotalPurged() { + public int getTotalPurged() { - return nPurgedRequests + nPurgedRecalls; - } + return nPurgedRequests + nPurgedRecalls; + } - public int getTotalPurgedRequests() { + public int getTotalPurgedRequests() { - return nPurgedRequests; - } + return nPurgedRequests; + } - public int getTotalPurgedRecalls() { + public int getTotalPurgedRecalls() { - return nPurgedRecalls; - } + return nPurgedRecalls; + } } \ No newline at end of file diff --git a/src/main/java/it/grid/storm/check/SimpleCheckManager.java b/src/main/java/it/grid/storm/check/SimpleCheckManager.java index 1bf61e8c1..bdab7f6bd 100644 --- a/src/main/java/it/grid/storm/check/SimpleCheckManager.java +++ b/src/main/java/it/grid/storm/check/SimpleCheckManager.java @@ -17,84 +17,80 @@ import it.grid.storm.check.sanity.filesystem.NamespaceFSExtendedACLUsageCheck; import it.grid.storm.check.sanity.filesystem.NamespaceFSExtendedAttributeUsageCheck; import it.grid.storm.filesystem.MtabUtil; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.model.VirtualFS; -/** - * @author Michele Dibenedetto - */ public class SimpleCheckManager extends CheckManager { - private static final Logger log = LoggerFactory - .getLogger(SimpleCheckManager.class); - - /** - * A list of checks to be executed - */ - private List checks = Lists.newArrayList(); - - @Override - protected Logger getLogger() { - - return log; - } - - @Override - protected void loadChecks() { - - /* Add by hand a new element for each requested check */ - try { - checks.add(getNamespaceFSAssociationCheck()); - } catch (IllegalStateException e) { - log.warn("Skipping NamespaceFSAssociationCheck. " - + "IllegalStateException: {}", e.getMessage()); - } - // checks.add(new NamespaceFSExtendedAttributeDeclarationCheck()); Removed - checks.add(new NamespaceFSExtendedAttributeUsageCheck()); - checks.add(new NamespaceFSExtendedACLUsageCheck()); - } - - /** - * - */ - private Check getNamespaceFSAssociationCheck() { - - Map mountPoints; - // load mstab mount points and file system types - try { - mountPoints = MtabUtil.getFSMountPoints(); - } catch (Exception e) { - log.error("Unable to get filesystem mount points. Exception: {}", e.getMessage()); - throw new IllegalStateException("Unable to get filesystem mount points"); - } - if (log.isDebugEnabled()) { - log.debug("Retrieved MountPoints: {}", printMapCouples(mountPoints)); - } - List vfsSet = NamespaceDirector.getNamespace().getAllDefinedVFS(); - return new NamespaceFSAssociationCheck(mountPoints, vfsSet); - } - - /** - * Prints the couple from a Map - * - * @param map - * @return - */ - private String printMapCouples(Map map) { - - String output = ""; - for (Entry couple : map.entrySet()) { - if (output.trim().length() != 0) { - output += " ; "; - } - output += "<" + couple.getKey() + "," + couple.getValue() + ">"; - } - return output; - } - - @Override - protected List prepareSchedule() { - - return checks; - } + private static final Logger log = LoggerFactory.getLogger(SimpleCheckManager.class); + + /** + * A list of checks to be executed + */ + private List checks = Lists.newArrayList(); + + @Override + protected Logger getLogger() { + + return log; + } + + @Override + protected void loadChecks() { + + /* Add by hand a new element for each requested check */ + try { + checks.add(getNamespaceFSAssociationCheck()); + } catch (IllegalStateException e) { + log.warn("Skipping NamespaceFSAssociationCheck. " + "IllegalStateException: {}", + e.getMessage()); + } + // checks.add(new NamespaceFSExtendedAttributeDeclarationCheck()); Removed + checks.add(new NamespaceFSExtendedAttributeUsageCheck()); + checks.add(new NamespaceFSExtendedACLUsageCheck()); + } + + /** + * + */ + private Check getNamespaceFSAssociationCheck() { + + Map mountPoints; + // load mstab mount points and file system types + try { + mountPoints = MtabUtil.getFSMountPoints(); + } catch (Exception e) { + log.error("Unable to get filesystem mount points. Exception: {}", e.getMessage()); + throw new IllegalStateException("Unable to get filesystem mount points"); + } + if (log.isDebugEnabled()) { + log.debug("Retrieved MountPoints: {}", printMapCouples(mountPoints)); + } + List vfsSet = Namespace.getInstance().getAllDefinedVFS(); + return new NamespaceFSAssociationCheck(mountPoints, vfsSet); + } + + /** + * Prints the couple from a Map + * + * @param map + * @return + */ + private String printMapCouples(Map map) { + + String output = ""; + for (Entry couple : map.entrySet()) { + if (output.trim().length() != 0) { + output += " ; "; + } + output += "<" + couple.getKey() + "," + couple.getValue() + ">"; + } + return output; + } + + @Override + protected List prepareSchedule() { + + return checks; + } } diff --git a/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSExtendedACLUsageCheck.java b/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSExtendedACLUsageCheck.java index e9b9350db..1b2d1c9c8 100644 --- a/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSExtendedACLUsageCheck.java +++ b/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSExtendedACLUsageCheck.java @@ -20,14 +20,10 @@ import it.grid.storm.griduser.CannotMapUserException; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.LocalUser; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.model.VirtualFS; -/** - * @author Michele Dibenedetto - * - */ public class NamespaceFSExtendedACLUsageCheck implements Check { private static final Logger log = LoggerFactory.getLogger(NamespaceFSExtendedACLUsageCheck.class); @@ -60,7 +56,7 @@ public CheckResponse execute() throws GenericCheckException { } try { // load declared file systems from namespace.xml - for (VirtualFS vfs : NamespaceDirector.getNamespace().getAllDefinedVFS()) { + for (VirtualFS vfs : Namespace.getInstance().getAllDefinedVFS()) { String fsRootPath = vfs.getRootPath().trim(); if (fsRootPath.charAt(fsRootPath.length() - 1) != File.separatorChar) { fsRootPath += File.separatorChar; diff --git a/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSExtendedAttributeDeclarationCheck.java b/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSExtendedAttributeDeclarationCheck.java index 7fa8d10bc..d75712b48 100644 --- a/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSExtendedAttributeDeclarationCheck.java +++ b/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSExtendedAttributeDeclarationCheck.java @@ -16,12 +16,9 @@ import it.grid.storm.check.GenericCheckException; import it.grid.storm.filesystem.MtabRow; import it.grid.storm.filesystem.MtabUtil; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.model.VirtualFS; -/** - * @author Michele Dibenedetto - */ public class NamespaceFSExtendedAttributeDeclarationCheck implements Check { private static final Logger log = @@ -52,7 +49,7 @@ public CheckResponse execute() throws GenericCheckException { } log.debug("Retrieved Mtab : {}", rows.toString()); // load declared file systems from namespace.xml - for (VirtualFS vfs : NamespaceDirector.getNamespace().getAllDefinedVFS()) { + for (VirtualFS vfs : Namespace.getInstance().getAllDefinedVFS()) { String fsTypeName = vfs.getFSType(); String fsRootPath = vfs.getRootPath(); if (fsTypeName == null || fsRootPath == null) { diff --git a/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSExtendedAttributeUsageCheck.java b/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSExtendedAttributeUsageCheck.java index b607e96e0..999606ae9 100644 --- a/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSExtendedAttributeUsageCheck.java +++ b/src/main/java/it/grid/storm/check/sanity/filesystem/NamespaceFSExtendedAttributeUsageCheck.java @@ -18,12 +18,9 @@ import it.grid.storm.ea.ExtendedAttributes; import it.grid.storm.ea.ExtendedAttributesException; import it.grid.storm.ea.ExtendedAttributesFactory; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.model.VirtualFS; -/** - * @author Michele Dibenedetto - */ public class NamespaceFSExtendedAttributeUsageCheck implements Check { private static final Logger log = @@ -62,7 +59,7 @@ public CheckResponse execute() throws GenericCheckException { CheckStatus status = CheckStatus.SUCCESS; String errorMessage = ""; // load declared file systems from namespace.xml - for (VirtualFS vfs : NamespaceDirector.getNamespace().getAllDefinedVFS()) { + for (VirtualFS vfs : Namespace.getInstance().getAllDefinedVFS()) { String fsRootPath = vfs.getRootPath().trim(); if (fsRootPath.charAt(fsRootPath.length() - 1) != File.separatorChar) { fsRootPath += File.separatorChar; diff --git a/src/main/java/it/grid/storm/config/ConfigReader.java b/src/main/java/it/grid/storm/config/ConfigReader.java index 56fb2670a..076e9ce8b 100644 --- a/src/main/java/it/grid/storm/config/ConfigReader.java +++ b/src/main/java/it/grid/storm/config/ConfigReader.java @@ -8,11 +8,9 @@ import java.util.Iterator; -import org.apache.commons.configuration.CompositeConfiguration; import org.apache.commons.configuration.Configuration; import org.apache.commons.configuration.ConfigurationException; import org.apache.commons.configuration.PropertiesConfiguration; -import org.apache.commons.configuration.reloading.FileChangedReloadingStrategy; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -24,24 +22,18 @@ public class ConfigReader { private String configurationPathname = ""; - public ConfigReader(String configurationPathname, int refresh) throws ConfigurationException { + public ConfigReader(String configurationPathname) throws ConfigurationException { checkNotNull(configurationPathname, "Null configuration pathname."); - int refreshRate = refresh < 0 ? 0 : refresh; this.configurationPathname = configurationPathname; - log.info("Configuration file {}. Refresh rate: {} seconds", configurationPathname, refreshRate); - - FileChangedReloadingStrategy strategy = new FileChangedReloadingStrategy(); - strategy.setRefreshDelay(refreshRate); - PropertiesConfiguration properties = new PropertiesConfiguration(configurationPathname); - logPropertiesConfiguration(properties); - properties.setReloadingStrategy(strategy); - this.c = new CompositeConfiguration(); - ((CompositeConfiguration) this.c).addConfiguration(properties); + log.info("Configuration file {}.", configurationPathname); + + c = new PropertiesConfiguration(configurationPathname); + logPropertiesConfiguration(c); log.info("Configuration read successfully."); } - private void logPropertiesConfiguration(PropertiesConfiguration properties) { + private void logPropertiesConfiguration(Configuration properties) { log.debug("Configuration properties: "); String key; diff --git a/src/main/java/it/grid/storm/config/ConfigurationDefaults.java b/src/main/java/it/grid/storm/config/ConfigurationDefaults.java new file mode 100644 index 000000000..052e0a378 --- /dev/null +++ b/src/main/java/it/grid/storm/config/ConfigurationDefaults.java @@ -0,0 +1,108 @@ +package it.grid.storm.config; + +public class ConfigurationDefaults { + + public static final int SRM_SERVICE_PORT = 8444; + public static final String DB_USER_NAME = "storm"; + public static final String DB_PASSWORD = "storm"; + public static final int DB_URL_PORT = 3306; + public static final String DB_URL_HOSTNAME = "localhost"; + public static final String DB_URL_PROPERTIES = "useLegacyDatetimeCode=false"; + public static final int DB_POOL_MAXWAITMILLIS = -1; + public static final boolean DB_POOL_TESTONBORROW = true; + public static final boolean DB_POOL_TESTWHILEIDLE = true; + public static final int STORMDB_POOL_MAXTOTAL = 500; + public static final int STORMDB_POOL_MINIDLE = 50; + public static final int STORMBEISAM_POOL_MAXTOTAL = 150; + public static final int STORMBEISAM_POOL_MINIDLE = 50; + public static final long CLEANING_INITIAL_DELAY = 10L; + public static final long CLEANING_TIME_INTERVAL = 300L; + public static final long FILE_DEFAULT_SIZE = 1000000L; + public static final long FILE_LIFETIME_DEFAULT = 3600L; + + public static final long PIN_LIFETIME_DEFAULT = 259200L; + public static final long PIN_LIFETIME_MAXIMUM = 1814400L; + public static final long TRANSIT_INITIAL_DELAY = 10L; + public static final long TRANSIT_TIME_INTERVAL = 300L; + public static final long PICKING_INITIAL_DELAY = 1L; + public static final long PICKING_TIME_INTERVAL = 2L; + public static final int PICKING_MAX_BATCH_SIZE = 100; + + public static final int XMLRPC_MAX_THREAD = 256; + public static final int XMLRPC_MAX_QUEUE_SIZE = 1000; + public static final int XMLRPC_SERVER_PORT = 8080; + + public static final int LS_MAX_NUMBER_OF_ENTRY = 500; + public static final boolean LS_ALL_LEVEL_RECURSIVE = false; + public static final int LS_NUM_OF_LEVELS = 1; + public static final int LS_OFFSET = 0; + + public static final int PTP_CORE_POOL_SIZE = 50; + public static final int PTP_MAX_POOL_SIZE = 200; + + public static final int PTP_QUEUE_SIZE = 1000; + public static final int PTG_CORE_POOL_SIZE = 50; + + public static final int PTG_MAX_POOL_SIZE = 200; + public static final int PTG_QUEUE_SIZE = 2000; + + public static final int BOL_CORE_POOL_SIZE = 50; + public static final int BOL_MAX_POOL_SIZE = 200; + public static final int BOL_QUEUE_SIZE = 2000; + + public static final int CORE_POOL_SIZE = 10; + public static final int MAX_POOL_SIZE = 50; + public static final int QUEUE_SIZE = 2000; + + public static final int GRIDFTP_TIME_OUT = 15000; + + public static final boolean AUTOMATIC_DIRECTORY_CREATION = false; + public static final String DEFAULT_OVERWRITE_MODE = "N"; + public static final String DEFAULT_FILE_STORAGE_TYPE = "V"; + + public static final int PURGE_BATCH_SIZE = 800; + public static final int EXPIRED_REQUEST_TIME = 604800; + public static final int REQUEST_PURGER_DELAY = 10; + public static final int REQUEST_PURGER_PERIOD = 600; + public static final boolean EXPIRED_REQUEST_PURGING = true; + + public static final String EXTRA_SLASHES_FOR_FILE_TURL = ""; + public static final String EXTRA_SLASHES_FOR_RFIO_TURL = ""; + public static final String EXTRA_SLASHES_FOR_GSIFTP_TURL = ""; + public static final String EXTRA_SLASHES_FOR_ROOT_TURL = "/"; + + public static final String PING_VALUES_PROPERTIES_FILENAME = "ping-values.properties"; + + public static final int HEARTHBEAT_PERIOD = 60; + public static final int PERFORMANCE_GLANCE_TIME_INTERVAL = 15; + public static final int PERFORMANCE_LOGBOOK_TIME_INTERVAL = 15; + public static final boolean PERFORMANCE_MEASURING = false; + public static final boolean BOOK_KEEPING_ENABLED = false; + public static final boolean ENABLE_WRITE_PERM_ON_DIRECTORY = false; + public static final int MAX_LOOP = 10; + public static final String GRID_USER_MAPPER_CLASSNAME = "it.grid.storm.griduser.StormLcmapsJNAMapper"; + + public static final int REFRESH_RATE_AUTHZDB_FILES_IN_SECONDS = 5; + public static final int REST_SERVICES_PORT = 9998; + public static final int REST_SERVICES_MAX_THREAD = 100; + public static final int REST_SERVICES_MAX_QUEUE_SIZE = 1000; + + public static final boolean PTG_SKIP_ACL_SETUP = false; + public static final boolean PTP_SKIP_ACL_SETUP = false; + + public static final long EXPIRED_INPROGRESS_BOL_TIME = 2592000L; + public static final long EXPIRED_INPROGRESS_PTP_TIME = 2592000L; + + public static final boolean SANITY_CHECK_ENABLED = true; + + public static final boolean XMLRPC_SECURITY_ENABLED = false; + + public static final boolean SYNCHRONOUS_QUOTA_CHECK_ENABLED = false; + public static final int GPFS_QUOTA_REFRESH_PERIOD = 900; + + public static final boolean DISKUSAGE_SERVICE_ENABLED = false; + + public static final boolean JAVA_NET_PREFERIPV6ADDRESSES = true; + + public static final long SERVER_POOL_STATUS_CHECK_TIMEOUT = 20000L; +} diff --git a/src/main/java/it/grid/storm/config/Configuration.java b/src/main/java/it/grid/storm/config/StormConfiguration.java similarity index 70% rename from src/main/java/it/grid/storm/config/Configuration.java rename to src/main/java/it/grid/storm/config/StormConfiguration.java index c3d284e5d..0fedc6ce1 100644 --- a/src/main/java/it/grid/storm/config/Configuration.java +++ b/src/main/java/it/grid/storm/config/StormConfiguration.java @@ -4,12 +4,93 @@ */ package it.grid.storm.config; +import static it.grid.storm.config.ConfigurationDefaults.AUTOMATIC_DIRECTORY_CREATION; +import static it.grid.storm.config.ConfigurationDefaults.BOL_CORE_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.BOL_MAX_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.BOL_QUEUE_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.BOOK_KEEPING_ENABLED; +import static it.grid.storm.config.ConfigurationDefaults.CLEANING_INITIAL_DELAY; +import static it.grid.storm.config.ConfigurationDefaults.CLEANING_TIME_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.CORE_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.DB_PASSWORD; +import static it.grid.storm.config.ConfigurationDefaults.DB_POOL_MAXWAITMILLIS; +import static it.grid.storm.config.ConfigurationDefaults.DB_POOL_TESTONBORROW; +import static it.grid.storm.config.ConfigurationDefaults.DB_POOL_TESTWHILEIDLE; +import static it.grid.storm.config.ConfigurationDefaults.DB_URL_HOSTNAME; +import static it.grid.storm.config.ConfigurationDefaults.DB_URL_PORT; +import static it.grid.storm.config.ConfigurationDefaults.DB_URL_PROPERTIES; +import static it.grid.storm.config.ConfigurationDefaults.DB_USER_NAME; +import static it.grid.storm.config.ConfigurationDefaults.DEFAULT_FILE_STORAGE_TYPE; +import static it.grid.storm.config.ConfigurationDefaults.DEFAULT_OVERWRITE_MODE; +import static it.grid.storm.config.ConfigurationDefaults.DISKUSAGE_SERVICE_ENABLED; +import static it.grid.storm.config.ConfigurationDefaults.ENABLE_WRITE_PERM_ON_DIRECTORY; +import static it.grid.storm.config.ConfigurationDefaults.EXPIRED_INPROGRESS_BOL_TIME; +import static it.grid.storm.config.ConfigurationDefaults.EXPIRED_INPROGRESS_PTP_TIME; +import static it.grid.storm.config.ConfigurationDefaults.EXPIRED_REQUEST_PURGING; +import static it.grid.storm.config.ConfigurationDefaults.EXPIRED_REQUEST_TIME; +import static it.grid.storm.config.ConfigurationDefaults.EXTRA_SLASHES_FOR_FILE_TURL; +import static it.grid.storm.config.ConfigurationDefaults.EXTRA_SLASHES_FOR_GSIFTP_TURL; +import static it.grid.storm.config.ConfigurationDefaults.EXTRA_SLASHES_FOR_RFIO_TURL; +import static it.grid.storm.config.ConfigurationDefaults.EXTRA_SLASHES_FOR_ROOT_TURL; +import static it.grid.storm.config.ConfigurationDefaults.FILE_DEFAULT_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.FILE_LIFETIME_DEFAULT; +import static it.grid.storm.config.ConfigurationDefaults.GPFS_QUOTA_REFRESH_PERIOD; +import static it.grid.storm.config.ConfigurationDefaults.GRIDFTP_TIME_OUT; +import static it.grid.storm.config.ConfigurationDefaults.GRID_USER_MAPPER_CLASSNAME; +import static it.grid.storm.config.ConfigurationDefaults.HEARTHBEAT_PERIOD; +import static it.grid.storm.config.ConfigurationDefaults.JAVA_NET_PREFERIPV6ADDRESSES; +import static it.grid.storm.config.ConfigurationDefaults.LS_ALL_LEVEL_RECURSIVE; +import static it.grid.storm.config.ConfigurationDefaults.LS_MAX_NUMBER_OF_ENTRY; +import static it.grid.storm.config.ConfigurationDefaults.LS_NUM_OF_LEVELS; +import static it.grid.storm.config.ConfigurationDefaults.LS_OFFSET; +import static it.grid.storm.config.ConfigurationDefaults.MAX_LOOP; +import static it.grid.storm.config.ConfigurationDefaults.MAX_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.PERFORMANCE_GLANCE_TIME_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.PERFORMANCE_LOGBOOK_TIME_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.PERFORMANCE_MEASURING; +import static it.grid.storm.config.ConfigurationDefaults.PICKING_INITIAL_DELAY; +import static it.grid.storm.config.ConfigurationDefaults.PICKING_MAX_BATCH_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.PICKING_TIME_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.PING_VALUES_PROPERTIES_FILENAME; +import static it.grid.storm.config.ConfigurationDefaults.PIN_LIFETIME_DEFAULT; +import static it.grid.storm.config.ConfigurationDefaults.PIN_LIFETIME_MAXIMUM; +import static it.grid.storm.config.ConfigurationDefaults.PTG_CORE_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.PTG_MAX_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.PTG_QUEUE_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.PTG_SKIP_ACL_SETUP; +import static it.grid.storm.config.ConfigurationDefaults.PTP_CORE_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.PTP_MAX_POOL_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.PTP_QUEUE_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.PTP_SKIP_ACL_SETUP; +import static it.grid.storm.config.ConfigurationDefaults.PURGE_BATCH_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.QUEUE_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.REFRESH_RATE_AUTHZDB_FILES_IN_SECONDS; +import static it.grid.storm.config.ConfigurationDefaults.REQUEST_PURGER_DELAY; +import static it.grid.storm.config.ConfigurationDefaults.REQUEST_PURGER_PERIOD; +import static it.grid.storm.config.ConfigurationDefaults.REST_SERVICES_MAX_QUEUE_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.REST_SERVICES_MAX_THREAD; +import static it.grid.storm.config.ConfigurationDefaults.REST_SERVICES_PORT; +import static it.grid.storm.config.ConfigurationDefaults.SANITY_CHECK_ENABLED; +import static it.grid.storm.config.ConfigurationDefaults.SERVER_POOL_STATUS_CHECK_TIMEOUT; +import static it.grid.storm.config.ConfigurationDefaults.SRM_SERVICE_PORT; +import static it.grid.storm.config.ConfigurationDefaults.STORMBEISAM_POOL_MAXTOTAL; +import static it.grid.storm.config.ConfigurationDefaults.STORMBEISAM_POOL_MINIDLE; +import static it.grid.storm.config.ConfigurationDefaults.STORMDB_POOL_MAXTOTAL; +import static it.grid.storm.config.ConfigurationDefaults.STORMDB_POOL_MINIDLE; +import static it.grid.storm.config.ConfigurationDefaults.SYNCHRONOUS_QUOTA_CHECK_ENABLED; +import static it.grid.storm.config.ConfigurationDefaults.TRANSIT_INITIAL_DELAY; +import static it.grid.storm.config.ConfigurationDefaults.TRANSIT_TIME_INTERVAL; +import static it.grid.storm.config.ConfigurationDefaults.XMLRPC_MAX_QUEUE_SIZE; +import static it.grid.storm.config.ConfigurationDefaults.XMLRPC_MAX_THREAD; +import static it.grid.storm.config.ConfigurationDefaults.XMLRPC_SECURITY_ENABLED; +import static it.grid.storm.config.ConfigurationDefaults.XMLRPC_SERVER_PORT; import static it.grid.storm.info.du.DiskUsageService.DEFAULT_INITIAL_DELAY; import static it.grid.storm.info.du.DiskUsageService.DEFAULT_TASKS_INTERVAL; import static it.grid.storm.info.du.DiskUsageService.DEFAULT_TASKS_PARALLEL; import static java.lang.System.getProperty; import java.io.File; +import java.io.IOException; import java.lang.reflect.Field; import java.lang.reflect.Method; import java.util.ArrayList; @@ -19,12 +100,11 @@ import org.apache.commons.configuration.ConfigurationException; import org.apache.commons.lang.ArrayUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.Lists; -import it.grid.storm.rest.RestServer; -import it.grid.storm.xmlrpc.XMLRPCHttpServer; - /** * Singleton holding all configuration values that any other object in the StoRM backend reads from * configuration files, databases, etc. Implements a 'get' method for each value that @@ -35,32 +115,47 @@ * specified in each method comment. */ -public class Configuration { +public class StormConfiguration { public static final String DEFAULT_STORM_CONFIG_FILE = "/etc/storm/backend-server/storm.properties"; - public static final int DEFAULT_STORM_CONFIG_REFRESH_RATE = 0; + + private static Logger log = LoggerFactory.getLogger(StormConfiguration.class); private final ConfigReader cr; - private static Configuration instance; + private static StormConfiguration instance; /* System properties */ public static final String CONFIG_FILE_PATH = "storm.configuration.file"; - public static final String REFRESH_RATE = "storm.configuration.refresh"; /* Configuration file properties */ private static final String MANAGED_SURLS_KEY = "storm.service.SURL.endpoint"; private static final String MANAGED_SURL_DEFAULT_PORTS_KEY = "storm.service.SURL.default-ports"; private static final String SERVICE_HOSTNAME_KEY = "storm.service.FE-public.hostname"; private static final String SERVICE_PORT_KEY = "storm.service.port"; - private static final String LIST_OF_MACHINE_IPS_KEY = "storm.service.FE-list.IPs"; - private static final String DB_URL_HOSTNAME = "storm.service.request-db.host"; - private static final String DB_URL_PROPERTIES = "storm.service.request-db.properties"; - private static final String DB_USER_NAME_KEY = "storm.service.request-db.username"; - private static final String DB_PASSWORD_KEY = "storm.service.request-db.passwd"; - private static final String DB_RECONNECT_PERIOD_KEY = "asynch.db.ReconnectPeriod"; - private static final String DB_RECONNECT_DELAY_KEY = "asynch.db.DelayPeriod"; + private static final String DB_URL_HOSTNAME_LEGACY_KEY = "storm.service.request-db.host"; + private static final String DB_URL_PORT_LEGACY_KEY = "storm.service.request-db.port"; + private static final String DB_URL_PROPERTIES_LEGACY_KEY = "storm.service.request-db.properties"; + private static final String DB_USER_NAME_LEGACY_KEY = "storm.service.request-db.username"; + private static final String DB_PASSWORD_LEGACY_KEY = "storm.service.request-db.passwd"; + + private static final String DB_URL_HOSTNAME_KEY = "storm.service.db.host"; + private static final String DB_URL_PORT_KEY = "storm.service.db.port"; + private static final String DB_URL_PROPERTIES_KEY = "storm.service.db.properties"; + private static final String DB_USER_NAME_KEY = "storm.service.db.username"; + private static final String DB_PASSWORD_KEY = "storm.service.db.password"; + + private static final String DB_POOL_MAXWAITMILLIS_KEY = "storm.service.db.pool.maxWaitMillis"; + private static final String DB_POOL_TESTONBORROW_KEY = "storm.service.db.pool.testOnBorrow"; + private static final String DB_POOL_TESTWHILEIDLE_KEY = "storm.service.db.pool.testWhileIdle"; + private static final String STORMDB_POOL_MAXTOTAL_KEY = "storm.service.db.pool.stormdb.maxTotal"; + private static final String STORMDB_POOL_MINIDLE_KEY = "storm.service.db.pool.stormdb.minIdle"; + private static final String STORMBEISAM_POOL_MAXTOTAL_KEY = + "storm.service.db.pool.stormbeisam.maxTotal"; + private static final String STORMBEISAM_POOL_MINIDLE_KEY = + "storm.service.db.pool.stormbeisam.minIdle"; + private static final String CLEANING_INITIAL_DELAY_KEY = "gc.pinnedfiles.cleaning.delay"; private static final String CLEANING_TIME_INTERVAL_KEY = "gc.pinnedfiles.cleaning.interval"; private static final String FILE_DEFAULT_SIZE_KEY = "fileSize.default"; @@ -75,11 +170,6 @@ public class Configuration { private static final String XMLRPC_MAX_THREAD_KEY = "synchcall.xmlrpc.maxthread"; private static final String XMLRPC_MAX_QUEUE_SIZE_KEY = "synchcall.xmlrpc.max_queue_size"; private static final String LIST_OF_DEFAULT_SPACE_TOKEN_KEY = "storm.service.defaultSpaceTokens"; - private static final String COMMAND_SERVER_BINDING_PORT_KEY = "storm.commandserver.port"; - private static final String BE_PERSISTENCE_POOL_DB_MAX_ACTIVE_KEY = - "persistence.internal-db.connection-pool.maxActive"; - private static final String BE_PERSISTENCE_POOL_DB_MAX_WAIT_KEY = - "persistence.internal-db.connection-pool.maxWait"; private static final String XMLRPC_SERVER_PORT_KEY = "synchcall.xmlrpc.unsecureServerPort"; private static final String LS_MAX_NUMBER_OF_ENTRY_KEY = "synchcall.directoryManager.maxLsEntry"; private static final String LS_ALL_LEVEL_RECURSIVE_KEY = @@ -101,19 +191,14 @@ public class Configuration { private static final String CORE_POOL_SIZE_KEY = "scheduler.crusher.workerCorePoolSize"; private static final String MAX_POOL_SIZE_KEY = "scheduler.crusher.workerMaxPoolSize"; private static final String QUEUE_SIZE_KEY = "scheduler.crusher.queueSize"; - private static final String NAMESPACE_CONFIG_FILENAME_KEY = "namespace.filename"; - private static final String NAMESPACE_SCHEMA_FILENAME_KEY = "namespace.schema.filename"; - private static final String NAMESPACE_CONFIG_REFRESH_RATE_IN_SECONDS_KEY = - "namespace.refreshrate"; - private static final String NAMESPACE_AUTOMATIC_RELOADING_KEY = - "namespace.automatic-config-reload"; private static final String GRIDFTP_TIME_OUT_KEY = "asynch.srmcopy.gridftp.timeout"; private static final String AUTOMATIC_DIRECTORY_CREATION_KEY = "directory.automatic-creation"; private static final String DEFAULT_OVERWRITE_MODE_KEY = "default.overwrite"; private static final String DEFAULT_FILE_STORAGE_TYPE_KEY = "default.storagetype"; private static final String PURGE_BATCH_SIZE_KEY = "purge.size"; private static final String EXPIRED_REQUEST_TIME_KEY = "expired.request.time"; - private static final String EXPIRED_INPROGRESS_PTP_TIME_KEY = "expired.inprogress.time"; + private static final String EXPIRED_INPROGRESS_BOL_TIME_KEY = "expired.inprogress.bol.time"; + private static final String EXPIRED_INPROGRESS_PTP_TIME_KEY = "expired.inprogress.ptp.time"; private static final String REQUEST_PURGER_DELAY_KEY = "purge.delay"; private static final String REQUEST_PURGER_PERIOD_KEY = "purge.interval"; private static final String EXPIRED_REQUEST_PURGING_KEY = "purging"; @@ -134,62 +219,47 @@ public class Configuration { private static final String GRID_USER_MAPPER_CLASSNAME_KEY = "griduser.mapper.classname"; private static final String AUTHZ_DB_PATH_KEY = "authzdb.path"; private static final String REFRESH_RATE_AUTHZDB_FILES_IN_SECONDS_KEY = "authzdb.refreshrate"; - private static final String RECALL_TABLE_TESTING_MODE_KEY = "tape.recalltable.service.test-mode"; private static final String REST_SERVICES_PORT_KEY = "storm.rest.services.port"; - private static final String REST_SERVICES_MAX_THREAD = "storm.rest.services.maxthread"; - private static final String REST_SERVICES_MAX_QUEUE_SIZE = "storm.rest.services.max_queue_size"; - private static final String RETRY_VALUE_KEY_KEY = "tape.recalltable.service.param.retry-value"; - private static final String STATUS_KEY_KEY = "tape.recalltable.service.param.status"; - private static final String TASKOVER_KEY_KEY = "tape.recalltable.service.param.takeover"; + private static final String REST_SERVICES_MAX_THREAD_KEY = "storm.rest.services.maxthread"; + private static final String REST_SERVICES_MAX_QUEUE_SIZE_KEY = + "storm.rest.services.max_queue_size"; private static final String STORM_PROPERTIES_VERSION_KEY = "storm.properties.version"; - private static final String TAPE_SUPPORT_ENABLED_KEY = "tape.support.enabled"; private static final String SYNCHRONOUS_QUOTA_CHECK_ENABLED_KEY = "info.quota-check.enabled"; private static final String GPFS_QUOTA_REFRESH_PERIOD_KEY = "info.quota.refresh.period"; - private static final String FAST_BOOTSTRAP_ENABLED_KEY = "bootstrap.fast.enabled"; private static final String SERVER_POOL_STATUS_CHECK_TIMEOUT_KEY = "server-pool.status-check.timeout"; private static final String SANITY_CHECK_ENABLED_KEY = "sanity-check.enabled"; private static final String XMLRPC_SECURITY_ENABLED_KEY = "synchcall.xmlrpc.security.enabled"; private static final String XMLRPC_SECURITY_TOKEN_KEY = "synchcall.xmlrpc.security.token"; - private static final String PTG_SKIP_ACL_SETUP = "ptg.skip-acl-setup"; + private static final String PTG_SKIP_ACL_SETUP_KEY = "ptg.skip-acl-setup"; + private static final String PTP_SKIP_ACL_SETUP_KEY = "ptp.skip-acl-setup"; private static final String HTTP_TURL_PREFIX = "http.turl_prefix"; private static final String NETWORKADDRESS_CACHE_TTL = "networkaddress.cache.ttl"; private static final String NETWORKADDRESS_CACHE_NEGATIVE_TTL = "networkaddress.cache.negative.ttl"; - public static final String DISKUSAGE_SERVICE_ENABLED = "storm.service.du.enabled"; - private static final String DISKUSAGE_SERVICE_INITIAL_DELAY = "storm.service.du.delaySecs"; - private static final String DISKUSAGE_SERVICE_TASKS_INTERVAL = "storm.service.du.periodSecs"; - private static final String DISKUSAGE_SERVICE_TASKS_PARALLEL = "storm.service.du.parallelTasks"; + public static final String DISKUSAGE_SERVICE_ENABLED_KEY = "storm.service.du.enabled"; + private static final String DISKUSAGE_SERVICE_INITIAL_DELAY_KEY = "storm.service.du.delaySecs"; + private static final String DISKUSAGE_SERVICE_TASKS_INTERVAL_KEY = "storm.service.du.periodSecs"; + private static final String DISKUSAGE_SERVICE_TASKS_PARALLEL_KEY = + "storm.service.du.parallelTasks"; - private static final String JAVA_NET_PREFERIPV6ADDRESSES = "java.net.preferIPv6Addresses"; + private static final String JAVA_NET_PREFERIPV6ADDRESSES_KEY = "java.net.preferIPv6Addresses"; - static { - try { - instance = new Configuration(); - } catch (ConfigurationException e) { - throw new ExceptionInInitializerError(e); - } + public static void init(String filePath) throws IOException, ConfigurationException { + instance = new StormConfiguration(filePath); } - private Configuration() throws ConfigurationException { - - String filePath = getProperty(CONFIG_FILE_PATH, DEFAULT_STORM_CONFIG_FILE); - int refreshRate; - try { - refreshRate = Integer.valueOf(getProperty(REFRESH_RATE)); - } catch (NumberFormatException e) { - refreshRate = DEFAULT_STORM_CONFIG_REFRESH_RATE; - } - cr = new ConfigReader(filePath, refreshRate); + private StormConfiguration(String filePath) throws IOException, ConfigurationException { + cr = new ConfigReader(filePath); } /** * Returns the sole instance of the Configuration class. */ - public static Configuration getInstance() { + public static StormConfiguration getInstance() { - return Configuration.instance; + return StormConfiguration.instance; } /** @@ -234,7 +304,7 @@ public Integer[] getManagedSurlDefaultPorts() { Integer[] portsArray; if (!cr.getConfiguration().containsKey(MANAGED_SURL_DEFAULT_PORTS_KEY)) { - portsArray = new Integer[] {8444}; + portsArray = new Integer[] {ConfigurationDefaults.SRM_SERVICE_PORT}; } else { // load from external source String[] portString = cr.getConfiguration().getStringArray(MANAGED_SURL_DEFAULT_PORTS_KEY); @@ -252,127 +322,96 @@ public Integer[] getManagedSurlDefaultPorts() { */ public String getServiceHostname() { - return cr.getConfiguration().getString(SERVICE_HOSTNAME_KEY, "UNDEFINED_STORM_HOSTNAME"); + String hostname = cr.getConfiguration().getString(SERVICE_HOSTNAME_KEY); + if (hostname == null) { + log.error("Hostname not defined! Please set '{}' property", SERVICE_HOSTNAME_KEY); + throw new IllegalArgumentException(SERVICE_HOSTNAME_KEY + " not set!"); + } + return hostname; } - /** - * Method used by SFN to establish the FE binding port. If no value is found in the configuration - * medium, then the default one is used instead. key="storm.service.port"; default value="8444" - */ public int getServicePort() { - return cr.getConfiguration().getInt(SERVICE_PORT_KEY, 8444); + return cr.getConfiguration().getInt(SERVICE_PORT_KEY, SRM_SERVICE_PORT); } - /** - * Method used to get a List of Strings of the IPs of the machine hosting the FE for _this_ StoRM - * instance! Used in the xmlrcp server configuration, to allow request coming from the specified - * IP. (Into the xmlrpc server the filter is done by IP, not hostname.) This paramter is mandatory - * when a distribuited FE-BE installation of StoRM is used togheter with a dynamic DNS on the FE - * hostname. In that case the properties storm.machinenames is not enough meaningfull. If no value - * is found in the configuration medium, then the default value is returned instead. - * key="storm.machineIPs"; default value={"127.0.0.1"}; - */ - public List getListOfMachineIPs() { + public String getDbUsername() { - if (cr.getConfiguration().containsKey(LIST_OF_MACHINE_IPS_KEY)) { + return cr.getConfiguration() + .getString(DB_USER_NAME_KEY, + cr.getConfiguration().getString(DB_USER_NAME_LEGACY_KEY, DB_USER_NAME)); + } - String[] names = cr.getConfiguration().getString(LIST_OF_MACHINE_IPS_KEY).split(";"); // split - for (int i = 0; i < names.length; i++) { - names[i] = names[i].trim().toLowerCase(); // for each bit remove - } - return Arrays.asList(names); + public String getDbPassword() { - } else { - return Arrays.asList("127.0.0.1"); - } + return cr.getConfiguration() + .getString(DB_PASSWORD_KEY, + cr.getConfiguration().getString(DB_PASSWORD_LEGACY_KEY, DB_PASSWORD)); } - /** - * Method used by all DAO Objects to get the DataBase Driver. If no value is found in the - * configuration medium, then the default value is returned instead. - * key="asynch.picker.db.driver"; default value="com.mysql.cj.jdbc.Driver"; - */ - public String getDBDriver() { + public String getDbHostname() { - return "com.mysql.cj.jdbc.Driver"; + return cr.getConfiguration() + .getString(DB_URL_HOSTNAME_KEY, + cr.getConfiguration().getString(DB_URL_HOSTNAME_LEGACY_KEY, DB_URL_HOSTNAME)); + } + + public String getDbProperties() { + + return cr.getConfiguration() + .getString(DB_URL_PROPERTIES_KEY, + cr.getConfiguration().getString(DB_URL_PROPERTIES_LEGACY_KEY, DB_URL_PROPERTIES)); + } + + public int getDbPort() { + + return cr.getConfiguration() + .getInt(DB_URL_PORT_KEY, cr.getConfiguration().getInt(DB_URL_PORT_LEGACY_KEY, DB_URL_PORT)); } /** - * Method used by all DAO Objects to get DB URL. If no value is found in the configuration medium, - * then the default value is returned instead. + * Sets the MaxWaitMillis property. Use -1 to make the pool wait indefinitely. */ - public String getStormDbURL() { + public int getDbPoolMaxWaitMillis() { - String host = getDBHostname(); - String properties = getDBProperties(); - if (properties.isEmpty()) { - return "jdbc:mysql://" + host + "/storm_db"; - } - return "jdbc:mysql://" + host + "/storm_db?" + properties; + return cr.getConfiguration().getInt(DB_POOL_MAXWAITMILLIS_KEY, DB_POOL_MAXWAITMILLIS); } /** - * Method used by all DAO Objects to get the DB username. If no value is found in the - * configuration medium, then the default value is returned instead. Default value = "storm"; key - * searched in medium = "asynch.picker.db.username". + * This property determines whether or not the pool will validate objects before they are borrowed + * from the pool. */ - public String getDBUserName() { + public boolean isDbPoolTestOnBorrow() { - return cr.getConfiguration().getString(DB_USER_NAME_KEY, "storm"); + return cr.getConfiguration().getBoolean(DB_POOL_TESTONBORROW_KEY, DB_POOL_TESTONBORROW); } /** - * Method used by all DAO Objects to get the DB password. If no value is found in the - * configuration medium, then the default value is returned instead. Default value = "storm"; key - * searched in medium = "asynch.picker.db.passwd". + * This property determines whether or not the idle object evictor will validate connections. */ - public String getDBPassword() { + public boolean isDbPoolTestWhileIdle() { - return cr.getConfiguration().getString(DB_PASSWORD_KEY, "storm"); + return cr.getConfiguration().getBoolean(DB_POOL_TESTWHILEIDLE_KEY, DB_POOL_TESTWHILEIDLE); } - public String getDBHostname() { + public int getStormDbPoolSize() { - return cr.getConfiguration().getString(DB_URL_HOSTNAME, "localhost"); + return cr.getConfiguration().getInt(STORMDB_POOL_MAXTOTAL_KEY, STORMDB_POOL_MAXTOTAL); } - /* - * END definition of MANDATORY PROPERTIES - */ - - public String getDBProperties() { + public int getStormDbPoolMinIdle() { - return cr.getConfiguration().getString(DB_URL_PROPERTIES, "serverTimezone=UTC&autoReconnect=true"); + return cr.getConfiguration().getInt(STORMDB_POOL_MINIDLE_KEY, STORMDB_POOL_MINIDLE); } - /** - * Method used by all DAOs to establish the reconnection period in _seconds_: after such period - * the DB connection will be closed and re-opened. Beware that after such time expires, the - * connection is _not_ automatically closed and reopened; rather, it acts as a flag that is - * considered by the main code and when the most appropriate time comes, the connection is closed - * and reopened. This is because of MySQL bug that does not allow a connection to remain open for - * an arbitrary amount of time! Else an Unexpected EOF Exception gets thrown by the JDBC driver! - * If no value is found in the configuration medium, then the default value is returned instead. - * key="asynch.db.ReconnectPeriod"; default value=18000; Keep in mind that 18000 seconds = 5 - * hours. - */ - public long getDBReconnectPeriod() { + public int getStormBeIsamPoolSize() { - return cr.getConfiguration().getLong(DB_RECONNECT_PERIOD_KEY, 18000); + return cr.getConfiguration().getInt(STORMBEISAM_POOL_MAXTOTAL_KEY, STORMBEISAM_POOL_MAXTOTAL); } - /** - * Method used by all DAOs to establish the reconnection delay in _seconds_: when StoRM is first - * launched it will wait for this amount of time before starting the timer. This is because of - * MySQL bug that does not allow a connection to remain open for an arbitrary amount of time! Else - * an Unexpected EOF Exception gets thrown by the JDBC driver! If no value is found in the - * configuration medium, then the default value is returned instead. - * key="asynch.db.ReconnectDelay"; default value=30; - */ - public long getDBReconnectDelay() { + public int getStormBeIsamPoolMinIdle() { - return cr.getConfiguration().getLong(DB_RECONNECT_DELAY_KEY, 30); + return cr.getConfiguration().getInt(STORMBEISAM_POOL_MINIDLE_KEY, STORMBEISAM_POOL_MINIDLE); } /** @@ -382,7 +421,7 @@ public long getDBReconnectDelay() { */ public long getCleaningInitialDelay() { - return cr.getConfiguration().getLong(CLEANING_INITIAL_DELAY_KEY, 10); + return cr.getConfiguration().getLong(CLEANING_INITIAL_DELAY_KEY, CLEANING_INITIAL_DELAY); } /** @@ -393,7 +432,7 @@ public long getCleaningInitialDelay() { */ public long getCleaningTimeInterval() { - return cr.getConfiguration().getLong(CLEANING_TIME_INTERVAL_KEY, 300); + return cr.getConfiguration().getLong(CLEANING_TIME_INTERVAL_KEY, CLEANING_TIME_INTERVAL); } /** @@ -403,7 +442,7 @@ public long getCleaningTimeInterval() { */ public long getFileDefaultSize() { - return cr.getConfiguration().getLong(FILE_DEFAULT_SIZE_KEY, 1000000); + return cr.getConfiguration().getLong(FILE_DEFAULT_SIZE_KEY, FILE_DEFAULT_SIZE); } /** @@ -414,7 +453,7 @@ public long getFileDefaultSize() { */ public long getFileLifetimeDefault() { - return cr.getConfiguration().getLong(FILE_LIFETIME_DEFAULT_KEY, 3600); + return cr.getConfiguration().getLong(FILE_LIFETIME_DEFAULT_KEY, FILE_LIFETIME_DEFAULT); } /** @@ -426,7 +465,7 @@ public long getFileLifetimeDefault() { */ public long getPinLifetimeDefault() { - return cr.getConfiguration().getLong(PIN_LIFETIME_DEFAULT_KEY, 259200); + return cr.getConfiguration().getLong(PIN_LIFETIME_DEFAULT_KEY, PIN_LIFETIME_DEFAULT); } /** @@ -437,7 +476,7 @@ public long getPinLifetimeDefault() { */ public long getPinLifetimeMaximum() { - return cr.getConfiguration().getLong(PIN_LIFETIME_MAXIMUM_KEY, 1814400); + return cr.getConfiguration().getLong(PIN_LIFETIME_MAXIMUM_KEY, PIN_LIFETIME_MAXIMUM); } /** @@ -447,7 +486,7 @@ public long getPinLifetimeMaximum() { */ public long getTransitInitialDelay() { - return cr.getConfiguration().getLong(TRANSIT_INITIAL_DELAY_KEY, 10); + return cr.getConfiguration().getLong(TRANSIT_INITIAL_DELAY_KEY, TRANSIT_INITIAL_DELAY); } /** @@ -457,7 +496,7 @@ public long getTransitInitialDelay() { */ public long getTransitTimeInterval() { - return cr.getConfiguration().getLong(TRANSIT_TIME_INTERVAL_KEY, 300); + return cr.getConfiguration().getLong(TRANSIT_TIME_INTERVAL_KEY, TRANSIT_TIME_INTERVAL); } /** @@ -467,7 +506,7 @@ public long getTransitTimeInterval() { */ public long getPickingInitialDelay() { - return cr.getConfiguration().getLong(PICKING_INITIAL_DELAY_KEY, 1); + return cr.getConfiguration().getLong(PICKING_INITIAL_DELAY_KEY, PICKING_INITIAL_DELAY); } /** @@ -477,7 +516,7 @@ public long getPickingInitialDelay() { */ public long getPickingTimeInterval() { - return cr.getConfiguration().getLong(PICKING_TIME_INTERVAL_KEY, 2); + return cr.getConfiguration().getLong(PICKING_TIME_INTERVAL_KEY, PICKING_TIME_INTERVAL); } /** @@ -487,7 +526,7 @@ public long getPickingTimeInterval() { */ public int getPickingMaxBatchSize() { - return cr.getConfiguration().getInt(PICKING_MAX_BATCH_SIZE_KEY, 100); + return cr.getConfiguration().getInt(PICKING_MAX_BATCH_SIZE_KEY, PICKING_MAX_BATCH_SIZE); } /** @@ -495,14 +534,14 @@ public int getPickingMaxBatchSize() { */ public int getXMLRPCMaxThread() { - return cr.getConfiguration() - .getInt(XMLRPC_MAX_THREAD_KEY, XMLRPCHttpServer.DEFAULT_MAX_THREAD_NUM); + int res = cr.getConfiguration().getInt(XMLRPC_MAX_THREAD_KEY, XMLRPC_MAX_THREAD); + return res <= 0 ? XMLRPC_MAX_THREAD : res; } public int getXMLRPCMaxQueueSize() { - return cr.getConfiguration() - .getInt(XMLRPC_MAX_QUEUE_SIZE_KEY, XMLRPCHttpServer.DEFAULT_MAX_QUEUE_SIZE); + int res = cr.getConfiguration().getInt(XMLRPC_MAX_QUEUE_SIZE_KEY, XMLRPC_MAX_QUEUE_SIZE); + return res <= 0 ? XMLRPC_MAX_QUEUE_SIZE : res; } /** @@ -522,41 +561,6 @@ public List getListOfDefaultSpaceToken() { return Lists.newArrayList(); } - /** - * Method used by StoRMCommandServer to establish the listening port to which it should bind. If - * no value is found in the configuration medium, then the default value is returned instead. - * key="storm.commandserver.port"; default value=4444; - */ - public int getCommandServerBindingPort() { - - return cr.getConfiguration().getInt(COMMAND_SERVER_BINDING_PORT_KEY, 4444); - } - - /** - * Method used in Persistence Component it returns an int indicating the maximum number of active - * connections in the connection pool. It is the maximum number of active connections that can be - * allocated from this pool at the same time... 0 (zero) for no limit. If no value is found in the - * configuration medium, then the default value is returned instead. - * key="persistence.db.pool.maxActive"; default value=10; - */ - public int getBEPersistencePoolDBMaxActive() { - - return cr.getConfiguration().getInt(BE_PERSISTENCE_POOL_DB_MAX_ACTIVE_KEY, 10); - } - - /** - * Method used in Persistence Component it returns an int indicating the maximum waiting time in - * _milliseconds_ for the connection in the pool. It represents the time that the pool will wait - * (when there are no available connections) for a connection to be returned before throwing an - * exception... a value of -1 to wait indefinitely. If no value is found in the configuration - * medium, then the default value is returned instead. key="persistence.db.pool.maxWait"; default - * value=50; - */ - public int getBEPersistencePoolDBMaxWait() { - - return cr.getConfiguration().getInt(BE_PERSISTENCE_POOL_DB_MAX_WAIT_KEY, 50); - } - /** * Method used by the Synch Component to set the binding port for the _unsecure_ xmlrpc server in * the BE. If no value is found in the configuration medium, then the default value is returned @@ -564,7 +568,7 @@ public int getBEPersistencePoolDBMaxWait() { */ public int getXmlRpcServerPort() { - return cr.getConfiguration().getInt(XMLRPC_SERVER_PORT_KEY, 8080); + return cr.getConfiguration().getInt(XMLRPC_SERVER_PORT_KEY, XMLRPC_SERVER_PORT); } /** @@ -576,7 +580,7 @@ public int getXmlRpcServerPort() { */ public int getLSMaxNumberOfEntry() { - return cr.getConfiguration().getInt(LS_MAX_NUMBER_OF_ENTRY_KEY, 500); + return cr.getConfiguration().getInt(LS_MAX_NUMBER_OF_ENTRY_KEY, LS_MAX_NUMBER_OF_ENTRY); } /** @@ -586,7 +590,7 @@ public int getLSMaxNumberOfEntry() { */ public boolean getLSallLevelRecursive() { - return cr.getConfiguration().getBoolean(LS_ALL_LEVEL_RECURSIVE_KEY, false); + return cr.getConfiguration().getBoolean(LS_ALL_LEVEL_RECURSIVE_KEY, LS_ALL_LEVEL_RECURSIVE); } /** @@ -596,7 +600,7 @@ public boolean getLSallLevelRecursive() { */ public int getLSnumOfLevels() { - return cr.getConfiguration().getInt(LS_NUM_OF_LEVELS_KEY, 1); + return cr.getConfiguration().getInt(LS_NUM_OF_LEVELS_KEY, LS_NUM_OF_LEVELS); } /** @@ -606,7 +610,7 @@ public int getLSnumOfLevels() { */ public int getLSoffset() { - return cr.getConfiguration().getInt(LS_OFFSET_KEY, 0); + return cr.getConfiguration().getInt(LS_OFFSET_KEY, LS_OFFSET); } /** @@ -624,7 +628,7 @@ public int getLSoffset() { */ public int getPtPCorePoolSize() { - return cr.getConfiguration().getInt(PTP_CORE_POOL_SIZE_KEY, 50); + return cr.getConfiguration().getInt(PTP_CORE_POOL_SIZE_KEY, PTP_CORE_POOL_SIZE); } /** @@ -642,7 +646,7 @@ public int getPtPCorePoolSize() { */ public int getPtPMaxPoolSize() { - return cr.getConfiguration().getInt(PTP_MAX_POOL_SIZE_KEY, 200); + return cr.getConfiguration().getInt(PTP_MAX_POOL_SIZE_KEY, PTP_MAX_POOL_SIZE); } /** @@ -660,7 +664,7 @@ public int getPtPMaxPoolSize() { */ public int getPtPQueueSize() { - return cr.getConfiguration().getInt(PTP_QUEUE_SIZE_KEY, 1000); + return cr.getConfiguration().getInt(PTP_QUEUE_SIZE_KEY, PTP_QUEUE_SIZE); } /** @@ -678,7 +682,7 @@ public int getPtPQueueSize() { */ public int getPtGCorePoolSize() { - return cr.getConfiguration().getInt(PTG_CORE_POOL_SIZE_KEY, 50); + return cr.getConfiguration().getInt(PTG_CORE_POOL_SIZE_KEY, PTG_CORE_POOL_SIZE); } /** @@ -696,7 +700,7 @@ public int getPtGCorePoolSize() { */ public int getPtGMaxPoolSize() { - return cr.getConfiguration().getInt(PTG_MAX_POOL_SIZE_KEY, 200); + return cr.getConfiguration().getInt(PTG_MAX_POOL_SIZE_KEY, PTG_MAX_POOL_SIZE); } /** @@ -714,7 +718,7 @@ public int getPtGMaxPoolSize() { */ public int getPtGQueueSize() { - return cr.getConfiguration().getInt(PTG_QUEUE_SIZE_KEY, 2000); + return cr.getConfiguration().getInt(PTG_QUEUE_SIZE_KEY, PTG_QUEUE_SIZE); } /** @@ -731,7 +735,7 @@ public int getPtGQueueSize() { */ public int getBoLCorePoolSize() { - return cr.getConfiguration().getInt(BOL_CORE_POOL_SIZE_KEY, 50); + return cr.getConfiguration().getInt(BOL_CORE_POOL_SIZE_KEY, BOL_CORE_POOL_SIZE); } /** @@ -748,7 +752,7 @@ public int getBoLCorePoolSize() { */ public int getBoLMaxPoolSize() { - return cr.getConfiguration().getInt(BOL_MAX_POOL_SIZE_KEY, 200); + return cr.getConfiguration().getInt(BOL_MAX_POOL_SIZE_KEY, BOL_MAX_POOL_SIZE); } /** @@ -766,7 +770,7 @@ public int getBoLMaxPoolSize() { */ public int getBoLQueueSize() { - return cr.getConfiguration().getInt(BOL_QUEUE_SIZE_KEY, 2000); + return cr.getConfiguration().getInt(BOL_QUEUE_SIZE_KEY, BOL_QUEUE_SIZE); } /** @@ -783,7 +787,7 @@ public int getBoLQueueSize() { */ public int getCorePoolSize() { - return cr.getConfiguration().getInt(CORE_POOL_SIZE_KEY, 10); + return cr.getConfiguration().getInt(CORE_POOL_SIZE_KEY, CORE_POOL_SIZE); } /** @@ -800,7 +804,7 @@ public int getCorePoolSize() { */ public int getMaxPoolSize() { - return cr.getConfiguration().getInt(MAX_POOL_SIZE_KEY, 50); + return cr.getConfiguration().getInt(MAX_POOL_SIZE_KEY, MAX_POOL_SIZE); } /** @@ -817,7 +821,7 @@ public int getMaxPoolSize() { */ public int getQueueSize() { - return cr.getConfiguration().getInt(QUEUE_SIZE_KEY, 2000); + return cr.getConfiguration().getInt(QUEUE_SIZE_KEY, QUEUE_SIZE); } /** @@ -827,7 +831,7 @@ public int getQueueSize() { */ public String getNamespaceConfigFilename() { - return cr.getConfiguration().getString(NAMESPACE_CONFIG_FILENAME_KEY, "namespace.xml"); + return "namespace.xml"; } /** @@ -837,25 +841,7 @@ public String getNamespaceConfigFilename() { */ public String getNamespaceSchemaFilename() { - return cr.getConfiguration().getString(NAMESPACE_SCHEMA_FILENAME_KEY, "Schema UNKNOWN!"); - } - - public int getNamespaceConfigRefreshRateInSeconds() { - - return cr.getConfiguration().getInt(NAMESPACE_CONFIG_REFRESH_RATE_IN_SECONDS_KEY, 3); - } - - /** - * getNamespaceAutomaticReloading - * - * @return boolean Method used by Namespace Configuration Reloading Strategy (Peeper). If "peeper" - * found namespace.xml config file changed it checks if it can perform an automatic - * reload. If no value is found in the configuration medium, then the default one is used - * instead. key="namespace.automatic-config-reload"; default value=false - */ - public boolean getNamespaceAutomaticReloading() { - - return cr.getConfiguration().getBoolean(NAMESPACE_AUTOMATIC_RELOADING_KEY, false); + return "namespace-1.5.0.xsd"; } /** @@ -865,7 +851,7 @@ public boolean getNamespaceAutomaticReloading() { */ public int getGridFTPTimeOut() { - return cr.getConfiguration().getInt(GRIDFTP_TIME_OUT_KEY, 15000); + return cr.getConfiguration().getInt(GRIDFTP_TIME_OUT_KEY, GRIDFTP_TIME_OUT); } /** @@ -876,7 +862,8 @@ public int getGridFTPTimeOut() { */ public boolean getAutomaticDirectoryCreation() { - return cr.getConfiguration().getBoolean(AUTOMATIC_DIRECTORY_CREATION_KEY, false); + return cr.getConfiguration() + .getBoolean(AUTOMATIC_DIRECTORY_CREATION_KEY, AUTOMATIC_DIRECTORY_CREATION); } /** @@ -886,7 +873,7 @@ public boolean getAutomaticDirectoryCreation() { */ public String getDefaultOverwriteMode() { - return cr.getConfiguration().getString(DEFAULT_OVERWRITE_MODE_KEY, "N"); + return cr.getConfiguration().getString(DEFAULT_OVERWRITE_MODE_KEY, DEFAULT_OVERWRITE_MODE); } /** @@ -896,7 +883,8 @@ public String getDefaultOverwriteMode() { */ public String getDefaultFileStorageType() { - return cr.getConfiguration().getString(DEFAULT_FILE_STORAGE_TYPE_KEY, "V"); + return cr.getConfiguration() + .getString(DEFAULT_FILE_STORAGE_TYPE_KEY, DEFAULT_FILE_STORAGE_TYPE); } /** @@ -906,7 +894,7 @@ public String getDefaultFileStorageType() { */ public int getPurgeBatchSize() { - return cr.getConfiguration().getInt(PURGE_BATCH_SIZE_KEY, 800); + return cr.getConfiguration().getInt(PURGE_BATCH_SIZE_KEY, PURGE_BATCH_SIZE); } /** @@ -918,7 +906,7 @@ public int getPurgeBatchSize() { */ public long getExpiredRequestTime() { - return cr.getConfiguration().getInt(EXPIRED_REQUEST_TIME_KEY, 604800); + return cr.getConfiguration().getInt(EXPIRED_REQUEST_TIME_KEY, EXPIRED_REQUEST_TIME); } /** @@ -928,7 +916,7 @@ public long getExpiredRequestTime() { */ public int getRequestPurgerDelay() { - return cr.getConfiguration().getInt(REQUEST_PURGER_DELAY_KEY, 10); + return cr.getConfiguration().getInt(REQUEST_PURGER_DELAY_KEY, REQUEST_PURGER_DELAY); } /** @@ -938,7 +926,7 @@ public int getRequestPurgerDelay() { */ public int getRequestPurgerPeriod() { - return cr.getConfiguration().getInt(REQUEST_PURGER_PERIOD_KEY, 600); + return cr.getConfiguration().getInt(REQUEST_PURGER_PERIOD_KEY, REQUEST_PURGER_PERIOD); } /** @@ -948,7 +936,7 @@ public int getRequestPurgerPeriod() { */ public boolean getExpiredRequestPurging() { - return cr.getConfiguration().getBoolean(EXPIRED_REQUEST_PURGING_KEY, true); + return cr.getConfiguration().getBoolean(EXPIRED_REQUEST_PURGING_KEY, EXPIRED_REQUEST_PURGING); } /** @@ -958,7 +946,8 @@ public boolean getExpiredRequestPurging() { */ public String getExtraSlashesForFileTURL() { - return cr.getConfiguration().getString(EXTRA_SLASHES_FOR_FILE_TURL_KEY, ""); + return cr.getConfiguration() + .getString(EXTRA_SLASHES_FOR_FILE_TURL_KEY, EXTRA_SLASHES_FOR_FILE_TURL); } /** @@ -969,7 +958,8 @@ public String getExtraSlashesForFileTURL() { */ public String getExtraSlashesForRFIOTURL() { - return cr.getConfiguration().getString(EXTRA_SLASHES_FOR_RFIO_TURL_KEY, ""); + return cr.getConfiguration() + .getString(EXTRA_SLASHES_FOR_RFIO_TURL_KEY, EXTRA_SLASHES_FOR_RFIO_TURL); } /** @@ -980,7 +970,8 @@ public String getExtraSlashesForRFIOTURL() { */ public String getExtraSlashesForGsiFTPTURL() { - return cr.getConfiguration().getString(EXTRA_SLASHES_FOR_GSIFTP_TURL_KEY, ""); + return cr.getConfiguration() + .getString(EXTRA_SLASHES_FOR_GSIFTP_TURL_KEY, EXTRA_SLASHES_FOR_GSIFTP_TURL); } /** @@ -991,7 +982,8 @@ public String getExtraSlashesForGsiFTPTURL() { */ public String getExtraSlashesForROOTTURL() { - return cr.getConfiguration().getString(EXTRA_SLASHES_FOR_ROOT_TURL_KEY, "/"); + return cr.getConfiguration() + .getString(EXTRA_SLASHES_FOR_ROOT_TURL_KEY, EXTRA_SLASHES_FOR_ROOT_TURL); } /** @@ -1003,8 +995,8 @@ public String getExtraSlashesForROOTTURL() { */ public String getPingValuesPropertiesFilename() { - final String KEY = "ping-values.properties"; - return cr.getConfiguration().getString(PING_VALUES_PROPERTIES_FILENAME_KEY, KEY); + return cr.getConfiguration() + .getString(PING_VALUES_PROPERTIES_FILENAME_KEY, PING_VALUES_PROPERTIES_FILENAME); } /** @@ -1013,53 +1005,55 @@ public String getPingValuesPropertiesFilename() { */ public int getHearthbeatPeriod() { - return cr.getConfiguration().getInt(HEARTHBEAT_PERIOD_KEY, 60); + return cr.getConfiguration().getInt(HEARTHBEAT_PERIOD_KEY, HEARTHBEAT_PERIOD); } /** - * getPerformanceGlancePeriod + * getHearthbeatPerformanceGlanceTimeInterval * * @return int If no value is found in the configuration medium, then the default one is used * instead. key="health.performance.glance.timeInterval"; default value=15 (15 sec) */ - public int getPerformanceGlanceTimeInterval() { + public int getHearthbeatPerformanceGlanceTimeInterval() { - return cr.getConfiguration().getInt(PERFORMANCE_GLANCE_TIME_INTERVAL_KEY, 15); + return cr.getConfiguration() + .getInt(PERFORMANCE_GLANCE_TIME_INTERVAL_KEY, PERFORMANCE_GLANCE_TIME_INTERVAL); } /** - * getPerformanceGlancePeriod + * getHearthbeatPerformanceGlancePeriod * * @return int If no value is found in the configuration medium, then the default one is used * instead. key="health.performance.logbook.timeInterval"; default value=15 (15 sec) */ - public int getPerformanceLogbookTimeInterval() { + public int getHearthbeatPerformanceLogbookTimeInterval() { - return cr.getConfiguration().getInt(PERFORMANCE_LOGBOOK_TIME_INTERVAL_KEY, 15); + return cr.getConfiguration() + .getInt(PERFORMANCE_LOGBOOK_TIME_INTERVAL_KEY, PERFORMANCE_LOGBOOK_TIME_INTERVAL); } /** - * getPerformanceMeasuring + * isHearthbeatPerformanceMeasuringEnabled * * @return boolean If no value is found in the configuration medium, then the default one is used * instead. key="health.performance.mesauring.enabled"; default value=false */ - public boolean getPerformanceMeasuring() { + public boolean isHearthbeatPerformanceMeasuringEnabled() { - return cr.getConfiguration().getBoolean(PERFORMANCE_MEASURING_KEY, false); + return cr.getConfiguration().getBoolean(PERFORMANCE_MEASURING_KEY, PERFORMANCE_MEASURING); } /** - * getBookKeppeingEnabled + * isHearthbeatBookkeepingEnabled * * @return boolean Method used by Namespace Configuration Reloading Strategy (Peeper). If "peeper" * found namespace.xml config file changed it checks if it can perform an automatic * reload. If no value is found in the configuration medium, then the default one is used * instead. key="health.bookkeeping.enabled"; default value=false */ - public boolean getBookKeepingEnabled() { + public boolean isHearthbeatBookkeepingEnabled() { - return cr.getConfiguration().getBoolean(BOOK_KEEPING_ENABLED_KEY, false); + return cr.getConfiguration().getBoolean(BOOK_KEEPING_ENABLED_KEY, BOOK_KEEPING_ENABLED); } /** @@ -1069,12 +1063,13 @@ public boolean getBookKeepingEnabled() { */ public boolean getEnableWritePermOnDirectory() { - return cr.getConfiguration().getBoolean(ENABLE_WRITE_PERM_ON_DIRECTORY_KEY, false); + return cr.getConfiguration() + .getBoolean(ENABLE_WRITE_PERM_ON_DIRECTORY_KEY, ENABLE_WRITE_PERM_ON_DIRECTORY); } public int getMaxLoop() { - return cr.getConfiguration().getInt(MAX_LOOP_KEY, 10); + return cr.getConfiguration().getInt(MAX_LOOP_KEY, MAX_LOOP); } /** @@ -1084,8 +1079,8 @@ public int getMaxLoop() { */ public String getGridUserMapperClassname() { - final String CLASSNAME = "it.grid.storm.griduser.StormLcmapsJNAMapper"; - return cr.getConfiguration().getString(GRID_USER_MAPPER_CLASSNAME_KEY, CLASSNAME); + return cr.getConfiguration() + .getString(GRID_USER_MAPPER_CLASSNAME_KEY, GRID_USER_MAPPER_CLASSNAME); } /** @@ -1105,12 +1100,8 @@ public String getAuthzDBPath() { */ public int getRefreshRateAuthzDBfilesInSeconds() { - return cr.getConfiguration().getInt(REFRESH_RATE_AUTHZDB_FILES_IN_SECONDS_KEY, 5); - } - - public boolean getRecallTableTestingMode() { - - return cr.getConfiguration().getBoolean(RECALL_TABLE_TESTING_MODE_KEY, false); + return cr.getConfiguration() + .getInt(REFRESH_RATE_AUTHZDB_FILES_IN_SECONDS_KEY, REFRESH_RATE_AUTHZDB_FILES_IN_SECONDS); } /** @@ -1120,17 +1111,18 @@ public boolean getRecallTableTestingMode() { */ public int getRestServicesPort() { - return cr.getConfiguration().getInt(REST_SERVICES_PORT_KEY, 9998); + return cr.getConfiguration().getInt(REST_SERVICES_PORT_KEY, REST_SERVICES_PORT); } public int getRestServicesMaxThreads() { - return cr.getConfiguration().getInt(REST_SERVICES_MAX_THREAD, RestServer.DEFAULT_MAX_THREAD_NUM); + return cr.getConfiguration().getInt(REST_SERVICES_MAX_THREAD_KEY, REST_SERVICES_MAX_THREAD); } public int getRestServicesMaxQueueSize() { - return cr.getConfiguration().getInt(REST_SERVICES_MAX_QUEUE_SIZE, RestServer.DEFAULT_MAX_QUEUE_SIZE); + return cr.getConfiguration() + .getInt(REST_SERVICES_MAX_QUEUE_SIZE_KEY, REST_SERVICES_MAX_QUEUE_SIZE); } /** @@ -1139,7 +1131,7 @@ public int getRestServicesMaxQueueSize() { */ public String getRetryValueKey() { - return cr.getConfiguration().getString(RETRY_VALUE_KEY_KEY, "retry-value"); + return "retry-value"; } /** @@ -1148,7 +1140,7 @@ public String getRetryValueKey() { */ public String getStatusKey() { - return cr.getConfiguration().getString(STATUS_KEY_KEY, "status"); + return "status"; } /** @@ -1157,7 +1149,7 @@ public String getStatusKey() { */ public String getTaskoverKey() { - return cr.getConfiguration().getString(TASKOVER_KEY_KEY, "first"); + return "first"; } public String getStoRMPropertiesVersion() { @@ -1165,22 +1157,13 @@ public String getStoRMPropertiesVersion() { return cr.getConfiguration().getString(STORM_PROPERTIES_VERSION_KEY, "No version specified"); } - /** - * Flag to support or not the TAPE integration. Default value is false. - * - * @return - */ - public boolean getTapeSupportEnabled() { - - return cr.getConfiguration().getBoolean(TAPE_SUPPORT_ENABLED_KEY, false); - } - /** * @return */ public boolean getSynchronousQuotaCheckEnabled() { - return cr.getConfiguration().getBoolean(SYNCHRONOUS_QUOTA_CHECK_ENABLED_KEY, false); + return cr.getConfiguration() + .getBoolean(SYNCHRONOUS_QUOTA_CHECK_ENABLED_KEY, SYNCHRONOUS_QUOTA_CHECK_ENABLED); } /** @@ -1189,15 +1172,7 @@ public boolean getSynchronousQuotaCheckEnabled() { */ public int getGPFSQuotaRefreshPeriod() { - return cr.getConfiguration().getInt(GPFS_QUOTA_REFRESH_PERIOD_KEY, 900); - } - - /** - * @return - */ - public boolean getFastBootstrapEnabled() { - - return cr.getConfiguration().getBoolean(FAST_BOOTSTRAP_ENABLED_KEY, true); + return cr.getConfiguration().getInt(GPFS_QUOTA_REFRESH_PERIOD_KEY, GPFS_QUOTA_REFRESH_PERIOD); } /** @@ -1205,17 +1180,18 @@ public boolean getFastBootstrapEnabled() { */ public Long getServerPoolStatusCheckTimeout() { - return cr.getConfiguration().getLong(SERVER_POOL_STATUS_CHECK_TIMEOUT_KEY, 20000); + return cr.getConfiguration() + .getLong(SERVER_POOL_STATUS_CHECK_TIMEOUT_KEY, SERVER_POOL_STATUS_CHECK_TIMEOUT); } public boolean getSanityCheckEnabled() { - return cr.getConfiguration().getBoolean(SANITY_CHECK_ENABLED_KEY, true); + return cr.getConfiguration().getBoolean(SANITY_CHECK_ENABLED_KEY, SANITY_CHECK_ENABLED); } public Boolean getXmlRpcTokenEnabled() { - return cr.getConfiguration().getBoolean(XMLRPC_SECURITY_ENABLED_KEY, false); + return cr.getConfiguration().getBoolean(XMLRPC_SECURITY_ENABLED_KEY, XMLRPC_SECURITY_ENABLED); } public String getXmlRpcToken() { @@ -1225,7 +1201,12 @@ public String getXmlRpcToken() { public Boolean getPTGSkipACLSetup() { - return cr.getConfiguration().getBoolean(PTG_SKIP_ACL_SETUP, false); + return cr.getConfiguration().getBoolean(PTG_SKIP_ACL_SETUP_KEY, PTG_SKIP_ACL_SETUP); + } + + public Boolean getPTPSkipACLSetup() { + + return cr.getConfiguration().getBoolean(PTP_SKIP_ACL_SETUP_KEY, PTP_SKIP_ACL_SETUP); } @Override @@ -1234,10 +1215,10 @@ public String toString() { StringBuilder configurationStringBuilder = new StringBuilder(); try { // This class methods - Method[] methods = Configuration.instance.getClass().getDeclaredMethods(); + Method[] methods = StormConfiguration.instance.getClass().getDeclaredMethods(); // This class fields - Field[] fields = Configuration.instance.getClass().getDeclaredFields(); + Field[] fields = StormConfiguration.instance.getClass().getDeclaredFields(); HashMap methodKeyMap = new HashMap<>(); for (Field field : fields) { String fieldName = field.getName(); @@ -1250,9 +1231,10 @@ public String toString() { + fieldName.substring(0, fieldName.lastIndexOf('_')).replace("_", "").toLowerCase(); if (methodKeyMap.containsKey(mapKey)) { String value = methodKeyMap.get(mapKey); - methodKeyMap.put(mapKey, value + " , " + (String) field.get(Configuration.instance)); + methodKeyMap.put(mapKey, + value + " , " + (String) field.get(StormConfiguration.instance)); } else { - methodKeyMap.put(mapKey, (String) field.get(Configuration.instance)); + methodKeyMap.put(mapKey, (String) field.get(StormConfiguration.instance)); } } } @@ -1266,7 +1248,7 @@ public String toString() { */ if (method.getName().substring(0, 3).equals("get") && (!method.getName().equals("getInstance")) && method.getModifiers() == 1) { - field = method.invoke(Configuration.instance, dummyArray); + field = method.invoke(StormConfiguration.instance, dummyArray); if (field.getClass().isArray()) { field = ArrayUtils.toString(field); } @@ -1301,7 +1283,13 @@ public String getHTTPTURLPrefix() { } public long getInProgressPutRequestExpirationTime() { - return cr.getConfiguration().getLong(EXPIRED_INPROGRESS_PTP_TIME_KEY, 2592000L); + return cr.getConfiguration() + .getLong(EXPIRED_INPROGRESS_PTP_TIME_KEY, EXPIRED_INPROGRESS_PTP_TIME); + } + + public long getInProgressBolRequestExpirationTime() { + return cr.getConfiguration() + .getLong(EXPIRED_INPROGRESS_BOL_TIME_KEY, EXPIRED_INPROGRESS_BOL_TIME); } public int getNetworkAddressCacheTtl() { @@ -1314,27 +1302,31 @@ public int getNetworkAddressCacheNegativeTtl() { public boolean getDiskUsageServiceEnabled() { - return cr.getConfiguration().getBoolean(DISKUSAGE_SERVICE_ENABLED, false); + return cr.getConfiguration() + .getBoolean(DISKUSAGE_SERVICE_ENABLED_KEY, DISKUSAGE_SERVICE_ENABLED); } public int getDiskUsageServiceInitialDelay() { - return cr.getConfiguration().getInt(DISKUSAGE_SERVICE_INITIAL_DELAY, DEFAULT_INITIAL_DELAY); + return cr.getConfiguration().getInt(DISKUSAGE_SERVICE_INITIAL_DELAY_KEY, DEFAULT_INITIAL_DELAY); } public int getDiskUsageServiceTasksInterval() { // default: 604800 s => 1 week - return cr.getConfiguration().getInt(DISKUSAGE_SERVICE_TASKS_INTERVAL, DEFAULT_TASKS_INTERVAL); + return cr.getConfiguration() + .getInt(DISKUSAGE_SERVICE_TASKS_INTERVAL_KEY, DEFAULT_TASKS_INTERVAL); } public boolean getDiskUsageServiceTasksParallel() { - return cr.getConfiguration().getBoolean(DISKUSAGE_SERVICE_TASKS_PARALLEL, DEFAULT_TASKS_PARALLEL); + return cr.getConfiguration() + .getBoolean(DISKUSAGE_SERVICE_TASKS_PARALLEL_KEY, DEFAULT_TASKS_PARALLEL); } public boolean getPreferIPv6Addresses() { - return cr.getConfiguration().getBoolean(JAVA_NET_PREFERIPV6ADDRESSES, true); + return cr.getConfiguration() + .getBoolean(JAVA_NET_PREFERIPV6ADDRESSES_KEY, JAVA_NET_PREFERIPV6ADDRESSES); } } diff --git a/src/main/java/it/grid/storm/griduser/GridUserManager.java b/src/main/java/it/grid/storm/griduser/GridUserManager.java index 679dcce3e..8b3ae1f17 100644 --- a/src/main/java/it/grid/storm/griduser/GridUserManager.java +++ b/src/main/java/it/grid/storm/griduser/GridUserManager.java @@ -15,7 +15,7 @@ package it.grid.storm.griduser; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; import java.util.Map; @@ -25,7 +25,7 @@ public class GridUserManager { static final Logger log = LoggerFactory.getLogger(GridUserManager.class); - static Configuration config = Configuration.getInstance(); + static StormConfiguration config = StormConfiguration.getInstance(); static GridUserFactory userFactory = null; static { diff --git a/src/main/java/it/grid/storm/health/BookKeeper.java b/src/main/java/it/grid/storm/health/BookKeeper.java index f41bddb57..153a0c416 100644 --- a/src/main/java/it/grid/storm/health/BookKeeper.java +++ b/src/main/java/it/grid/storm/health/BookKeeper.java @@ -4,37 +4,37 @@ */ package it.grid.storm.health; -import java.util.ArrayList; +import java.util.List; import org.slf4j.Logger; +import com.google.common.collect.Lists; + public abstract class BookKeeper { - protected Logger bookKeepingLog = HealthDirector.getBookKeepingLogger(); - protected Logger performanceLog = HealthDirector.getPerformanceLogger(); + protected Logger bookKeepingLog = HealthDirector.getBookKeepingLogger(); + protected Logger performanceLog = HealthDirector.getPerformanceLogger(); - protected ArrayList logbook = new ArrayList(); + protected List logbook = Lists.newArrayList(); - public abstract void addLogEvent(LogEvent logEvent); + public abstract void addLogEvent(LogEvent logEvent); - public synchronized void cleanLogBook() { - logbook.clear(); - } + public synchronized void cleanLogBook() { + logbook.clear(); + } - protected void logDebug(String msg) { + protected void logDebug(String msg) { - if ((HealthDirector.isBookKeepingConfigured()) - && (HealthDirector.isBookKeepingEnabled())) { - bookKeepingLog.debug("BK: {}", msg); - } - } + if (HealthDirector.isBookKeepingEnabled()) { + bookKeepingLog.debug("BK: {}", msg); + } + } - protected void logInfo(String msg) { + protected void logInfo(String msg) { - if ((HealthDirector.isBookKeepingConfigured()) - && (HealthDirector.isBookKeepingEnabled())) { - bookKeepingLog.info(msg); - } - } + if (HealthDirector.isBookKeepingEnabled()) { + bookKeepingLog.info(msg); + } + } } diff --git a/src/main/java/it/grid/storm/health/DetectiveGlance.java b/src/main/java/it/grid/storm/health/DetectiveGlance.java index 15d340c20..7b255bd84 100644 --- a/src/main/java/it/grid/storm/health/DetectiveGlance.java +++ b/src/main/java/it/grid/storm/health/DetectiveGlance.java @@ -78,8 +78,7 @@ public StoRMStatus haveaLook() { stormStatus.setHeapFreeSize(getHeapFreeSize()); stormStatus.setMAXHeapSize(getHeapMaxSize()); stormStatus.setHeapSize(getHeapSize()); - SimpleBookKeeper bk = HealthDirector.getHealthMonitor() - .getSimpleBookKeeper(); + SimpleBookKeeper bk = HealthMonitor.getInstance().getSimpleBookKeeper(); if (bk != null) { int ptgReq = bk.getNumberOfRequest(OperationType.PTG); diff --git a/src/main/java/it/grid/storm/health/HealthDirector.java b/src/main/java/it/grid/storm/health/HealthDirector.java index 00da66a2e..b955c1fca 100644 --- a/src/main/java/it/grid/storm/health/HealthDirector.java +++ b/src/main/java/it/grid/storm/health/HealthDirector.java @@ -4,7 +4,7 @@ */ package it.grid.storm.health; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; import it.grid.storm.logging.StoRMLoggers; import java.text.SimpleDateFormat; @@ -14,82 +14,99 @@ public class HealthDirector { - public static final Logger LOGGER = StoRMLoggers.getHBLogger(); + public static final Logger LOGGER = StoRMLoggers.getHBLogger(); public static final Logger HEARTLOG = StoRMLoggers.getHBLogger(); private static final Logger BOOKKEEPING = StoRMLoggers.getBKLogger(); private static final Logger PERFLOG = StoRMLoggers.getPerfLogger(); private static boolean initialized = false; - private static HealthMonitor healthMonitorIstance = null; - private static boolean bookKeepingConfigured = false; - private static boolean bookKeepingEnabled = false; + private static boolean bookKeepingEnabled; - private static boolean performanceMonitorConfigured = false; private static boolean performanceMonitorEnabled = false; private static long bornInstant = -1L; private static String bornInstantStr = null; - public static int timeToLiveLogEventInSec = Configuration.getInstance() - .getPerformanceLogbookTimeInterval(); - - /** - * - * @param testingMode - * boolean - */ - public static void initializeDirector(boolean testingMode) { - - // configureHealthLog(testingMode); - - bookKeepingEnabled = Configuration.getInstance().getBookKeepingEnabled(); - if (bookKeepingEnabled) { - // configureBookKeeping(testingMode); - bookKeepingConfigured = true; - } - - int statusPeriod = Configuration.getInstance().getHearthbeatPeriod(); - if (testingMode) { - statusPeriod = 5; - } - - // Record the born of StoRM instance - bornInstant = System.currentTimeMillis(); - Date date = new Date(bornInstant); - SimpleDateFormat formatter = new SimpleDateFormat("yyyy.MM.dd HH.mm.ss"); - bornInstantStr = formatter.format(date); - - healthMonitorIstance = new HealthMonitor(1, statusPeriod); // Start after 1 - // sec - - // Setting performance rate - performanceMonitorEnabled = Configuration.getInstance() - .getPerformanceMeasuring(); - if (performanceMonitorEnabled) { - // configurePerformanceMonitor(testingMode); - int glanceTimeInterval = Configuration.getInstance() - .getPerformanceGlanceTimeInterval(); - - LOGGER.debug("----- Performance GLANCE Time Interval = " - + glanceTimeInterval); - LOGGER.debug("----- Performance LOGBOOK Time Interval = " - + timeToLiveLogEventInSec); - - healthMonitorIstance.initializePerformanceMonitor( - timeToLiveLogEventInSec, glanceTimeInterval); - - } - - initialized = true; - - } + public static int timeToLiveLogEventInSec; + +// +// public static void initializeDirector(Configuration config) { +// +// bookKeepingEnabled = config.isHearthbeatBookkeepingEnabled(); +// timeToLiveLogEventInSec = config.getPerformanceLogbookTimeInterval(); +// +// int statusPeriod = Configuration.getInstance().getHearthbeatPeriod(); +// +// bornInstant = System.currentTimeMillis(); +// healthMonitorIstance = new HealthMonitor(1, statusPeriod); +// +// // Setting performance rate +// performanceMonitorEnabled = Configuration.getInstance().isHearthbeatPerformanceMeasuringEnabled(); +// if (performanceMonitorEnabled) { +// int glanceTimeInterval = Configuration.getInstance().getHearthbeatPerformanceGlanceTimeInterval(); +// +// LOGGER.debug("----- Performance GLANCE Time Interval = {}", glanceTimeInterval); +// LOGGER.debug("----- Performance LOGBOOK Time Interval = {}", timeToLiveLogEventInSec); +// +// healthMonitorIstance.initializePerformanceMonitor(timeToLiveLogEventInSec, +// glanceTimeInterval); +// +// } +// +// initialized = true; +// +// } +// +// +// public static void initializeDirector() { +// +// // configureHealthLog(testingMode); +// +// bookKeepingEnabled = Configuration.getInstance().getBookKeepingEnabled(); +// if (bookKeepingEnabled) { +// // configureBookKeeping(testingMode); +// bookKeepingConfigured = true; +// } +// +// int statusPeriod = Configuration.getInstance().getHearthbeatPeriod(); +// if (testingMode) { +// statusPeriod = 5; +// } +// +// // Record the born of StoRM instance +// bornInstant = System.currentTimeMillis(); +// Date date = new Date(bornInstant); +// SimpleDateFormat formatter = new SimpleDateFormat("yyyy.MM.dd HH.mm.ss"); +// bornInstantStr = formatter.format(date); +// +// healthMonitorIstance = new HealthMonitor(1, statusPeriod); // Start after 1 +// // sec +// +// // Setting performance rate +// performanceMonitorEnabled = Configuration.getInstance() +// .getPerformanceMeasuring(); +// if (performanceMonitorEnabled) { +// // configurePerformanceMonitor(testingMode); +// int glanceTimeInterval = Configuration.getInstance() +// .getPerformanceGlanceTimeInterval(); +// +// LOGGER.debug("----- Performance GLANCE Time Interval = " +// + glanceTimeInterval); +// LOGGER.debug("----- Performance LOGBOOK Time Interval = " +// + timeToLiveLogEventInSec); +// +// healthMonitorIstance.initializePerformanceMonitor( +// timeToLiveLogEventInSec, glanceTimeInterval); +// +// } +// +// initialized = true; +// +// } private static String getHealthPatternLayout() { - /** - * @todo : Retrieve Patter Layout from Configuration .. - */ String pattern = "[%d{ISO8601}]: %m%n"; return pattern; } @@ -154,89 +171,52 @@ public static Logger getPerformanceLogger() { return PERFLOG; } - public static boolean isBookKeepingConfigured() { - - return bookKeepingConfigured; - } - public static boolean isBookKeepingEnabled() { return bookKeepingEnabled; } - public static boolean isPerformanceMonitorConfigured() { - - return performanceMonitorConfigured; - } - public static boolean isPerformanceMonitorEnabled() { return performanceMonitorEnabled; } - /** - * - * @return Logger - */ public static Logger getBookKeepingLogger() { return BOOKKEEPING; } - /** - * - * @return Namespace - */ - public static HealthMonitor getHealthMonitor() { - - if (!(initialized)) { - initializeDirector(false); - } - return healthMonitorIstance; - } - - /** - * - * @return Namespace - */ - public static HealthMonitor getHealthMonitor(boolean testingMode) { - - if (!(initialized)) { - initializeDirector(testingMode); - } - return healthMonitorIstance; - } - - public static long getBornInstant(boolean testingMode) { - if (!(initialized)) { - initializeDirector(testingMode); - } - return bornInstant; - } - - public static String getBornInstantStr(boolean testingMode) { - - if (!(initialized)) { - initializeDirector(testingMode); - } - return bornInstantStr; - } - - public static long getBornInstant() { - - if (!(initialized)) { - initializeDirector(false); - } - return bornInstant; - } - - public static String getBornInstantStr() { - - if (!(initialized)) { - initializeDirector(false); - } - return bornInstantStr; - } +// public static long getBornInstant(boolean testingMode) { +// +// if (!(initialized)) { +// initializeDirector(testingMode); +// } +// return bornInstant; +// } +// +// public static String getBornInstantStr(boolean testingMode) { +// +// if (!(initialized)) { +// initializeDirector(testingMode); +// } +// return bornInstantStr; +// } +// +// public static long getBornInstant() { +// +// if (!(initialized)) { +// initializeDirector(false); +// } +// return bornInstant; +// } +// +// public static String getBornInstantStr() { +// +// if (!(initialized)) { +// initializeDirector(false); +// } +// return bornInstantStr; +// } } diff --git a/src/main/java/it/grid/storm/health/HealthMonitor.java b/src/main/java/it/grid/storm/health/HealthMonitor.java index 2d5ca0472..12e944e8c 100644 --- a/src/main/java/it/grid/storm/health/HealthMonitor.java +++ b/src/main/java/it/grid/storm/health/HealthMonitor.java @@ -4,86 +4,109 @@ */ package it.grid.storm.health; -import java.util.ArrayList; import java.util.Hashtable; +import java.util.List; import java.util.Timer; import org.slf4j.Logger; +import com.google.common.collect.Lists; + +import it.grid.storm.config.StormConfiguration; + public class HealthMonitor { - private Logger HEARTLOG = HealthDirector.HEARTLOG; - private Logger PERFLOG = HealthDirector.getPerformanceLogger(); + private static HealthMonitor instance = null; + + private Logger HEARTLOG = HealthDirector.HEARTLOG; + private Logger PERFLOG = HealthDirector.getPerformanceLogger(); + + private Timer healthTimer; + private Hashtable bookKeepers; + + private long period; + private long bornInstant; + + public synchronized static HealthMonitor getInstance() { + + if (instance == null) { + instance = new HealthMonitor(StormConfiguration.getInstance()); + } + return instance; + } + + public static void init() { + instance = new HealthMonitor(StormConfiguration.getInstance()); + } + + private HealthMonitor(StormConfiguration config) { - private Timer healthTimer = null; - private Hashtable bookKeepers; + healthTimer = new Timer(); + bookKeepers = new Hashtable(); - public static int perfGlanceTimeInterval = 15; // 15 sec + if (!config.isHearthbeatBookkeepingEnabled() && !config.isHearthbeatPerformanceMeasuringEnabled()) { + return; + } - public HealthMonitor(int delay, int period) { + if (config.isHearthbeatPerformanceMeasuringEnabled()) { - healthTimer = new Timer(); - this.heartbeat(delay * 1000, period * 1000); + int logTimeInterval = config.getHearthbeatPerformanceLogbookTimeInterval(); + int defaultGlangeTimeInterval= config.getHearthbeatPerformanceGlanceTimeInterval(); - // Create the Book Keepers - bookKeepers = new Hashtable(); + if (defaultGlangeTimeInterval > logTimeInterval) { + HealthDirector.getPerformanceLogger() + .warn("WARNING: Log Book has the time interval lower than Glance time interval!"); + } + PerformanceBookKeeper pbk = + new PerformanceBookKeeper(logTimeInterval, defaultGlangeTimeInterval); + period = pbk.getGlanceWindowInMSec(); + healthTimer.scheduleAtFixedRate(new PerformancePulse(), 0, period); + PERFLOG.info("Set PERFORMANCE MONITOR in Timer Task (PERIOD:{})", period); + bookKeepers.put(PerformanceBookKeeper.KEY, pbk); + PERFLOG.info("--- PERFORMANCE MONITOR Initialized"); - // Add the Simple BookKeeper - bookKeepers.put(SimpleBookKeeper.KEY, new SimpleBookKeeper()); + } + + if (config.isHearthbeatPerformanceMeasuringEnabled()) { - HEARTLOG.info("HEART MONITOR Initialized"); - } + period = config.getHearthbeatPeriod(); + healthTimer.scheduleAtFixedRate(new Hearthbeat(), 1000L, period * 1000L); + HEARTLOG.info("Set HEARTHBEAT in Timer Task (DELAY: {}, PERIOD: {})", 1000L, period); + bookKeepers.put(SimpleBookKeeper.KEY, new SimpleBookKeeper()); + HEARTLOG.info("HEART MONITOR Initialized"); + } - public void initializePerformanceMonitor(int logTimeInterval, - int defaultGlangeTimeInterval) { + bornInstant = System.currentTimeMillis(); + } - if (defaultGlangeTimeInterval > logTimeInterval) { - HealthDirector.getPerformanceLogger().warn( - "WARNING: Log Book has the time " - + "interval lower than Glance time interval!"); - } - // Add the Performance BookKeeper - PerformanceBookKeeper pbk = new PerformanceBookKeeper(logTimeInterval, - defaultGlangeTimeInterval); - bookKeepers.put(PerformanceBookKeeper.KEY, pbk); + public List getBookKeepers() { - long pulseTimeInterval = pbk.getGlanceWindowInMSec(); - // this.perfEnabled = true; - healthTimer.scheduleAtFixedRate(new PerformancePulse(), 0, - pulseTimeInterval); - PERFLOG.info("Set PERFORMANCE MONITOR in Timer Task (PERIOD:{})", - perfGlanceTimeInterval); - - PERFLOG.info("--- PERFORMANCE MONITOR Initialized"); - } + return Lists.newArrayList(bookKeepers.values()); + } - public ArrayList getBookKeepers() { + public PerformanceBookKeeper getPerformanceBookKeeper() { - return new ArrayList(bookKeepers.values()); - } + if (bookKeepers.containsKey(PerformanceBookKeeper.KEY)) { + return (PerformanceBookKeeper) bookKeepers.get(PerformanceBookKeeper.KEY); + } + return null; + } - public PerformanceBookKeeper getPerformanceBookKeeper() { + public SimpleBookKeeper getSimpleBookKeeper() { - if (bookKeepers.containsKey(PerformanceBookKeeper.KEY)) { - return (PerformanceBookKeeper) bookKeepers.get(PerformanceBookKeeper.KEY); - } else { - return null; - } - } + if (bookKeepers.containsKey(SimpleBookKeeper.KEY)) { + return (SimpleBookKeeper) bookKeepers.get(SimpleBookKeeper.KEY); + } + return null; + } - public SimpleBookKeeper getSimpleBookKeeper() { + public long getPeriod() { - if (bookKeepers.containsKey(SimpleBookKeeper.KEY)) { - return (SimpleBookKeeper) bookKeepers.get(SimpleBookKeeper.KEY); - } else { - return null; - } - } + return period; + } - public void heartbeat(int delay, int period) { + public long getBornInstant() { - healthTimer.scheduleAtFixedRate(new Hearthbeat(), delay, period); - HEARTLOG.info("Set HEARTHBEAT in Timer Task (DELAY: {}, PERIOD: {})", delay, - period); - } + return bornInstant; + } } diff --git a/src/main/java/it/grid/storm/health/PerformanceGlance.java b/src/main/java/it/grid/storm/health/PerformanceGlance.java index aca3dc379..6a5ecf1c7 100644 --- a/src/main/java/it/grid/storm/health/PerformanceGlance.java +++ b/src/main/java/it/grid/storm/health/PerformanceGlance.java @@ -9,39 +9,25 @@ import java.util.ArrayList; -import org.slf4j.Logger; - -/** - * @author zappi - * - */ public class PerformanceGlance { - private static Logger PERFLOG = HealthDirector.getPerformanceLogger(); - - /** - * - * @return StoRMStatus - */ - public PerformanceStatus haveaLook() { + public PerformanceStatus haveaLook() { - HealthDirector.LOGGER.debug("Having a look.."); - PerformanceStatus performanceStatus = null; + HealthDirector.LOGGER.debug("Having a look.."); + PerformanceStatus performanceStatus = null; - PerformanceBookKeeper pbk = HealthDirector.getHealthMonitor() - .getPerformanceBookKeeper(); + PerformanceBookKeeper pbk = HealthMonitor.getInstance().getPerformanceBookKeeper(); - if (pbk != null) { - performanceStatus = pbk.getPerformanceStatus(); - ArrayList zombies = pbk.removeZombieEvents(); - HealthDirector.LOGGER - .debug("Removed # <{}> zombies.", zombies.size()); + if (pbk != null) { + performanceStatus = pbk.getPerformanceStatus(); + ArrayList zombies = pbk.removeZombieEvents(); + HealthDirector.LOGGER.debug("Removed # <{}> zombies.", zombies.size()); - HealthDirector.LOGGER.debug("have a look : {}", performanceStatus); - } + HealthDirector.LOGGER.debug("have a look : {}", performanceStatus); + } - HealthDirector.LOGGER.debug(".. glance ended."); - return performanceStatus; - } + HealthDirector.LOGGER.debug(".. glance ended."); + return performanceStatus; + } } diff --git a/src/main/java/it/grid/storm/health/PerformanceStatus.java b/src/main/java/it/grid/storm/health/PerformanceStatus.java index cb5cda3c3..779fb7716 100644 --- a/src/main/java/it/grid/storm/health/PerformanceStatus.java +++ b/src/main/java/it/grid/storm/health/PerformanceStatus.java @@ -12,72 +12,62 @@ import org.slf4j.Logger; -/** - * @author zappi - * - */ public class PerformanceStatus { - private Logger PERF_LOG = HealthDirector.getPerformanceLogger(); + private Logger PERF_LOG = HealthDirector.getPerformanceLogger(); - private String pulseNumberStr = ""; - private Hashtable perfStatus = new Hashtable(); - private static int timeWindows = HealthDirector.getHealthMonitor().perfGlanceTimeInterval; + private String pulseNumberStr = ""; + private Hashtable perfStatus = + new Hashtable(); + private static long timeWindows = HealthMonitor.getInstance().getPeriod(); - public PerformanceStatus(ArrayList eventToAnalyze) { + public PerformanceStatus(ArrayList eventToAnalyze) { - PERF_LOG.debug("PERFORMANCE STATUS"); - PerformanceEvent pEvent; - OperationType ot; - if (eventToAnalyze != null) { - PERF_LOG.debug("PERFORMANCE STATUS : {}", - eventToAnalyze.size()); - for (LogEvent event : eventToAnalyze) { - ot = event.getOperationType(); - if (perfStatus.containsKey(ot)) { - pEvent = perfStatus.get(event.getOperationType()); - } else { - pEvent = new PerformanceEvent(ot); - } - pEvent.addLogEvent(event); - perfStatus.put(ot, pEvent); - } - } else { - PERF_LOG.debug("NO EVENTS TO ANALYZE!!!"); - } - } + PERF_LOG.debug("PERFORMANCE STATUS"); + PerformanceEvent pEvent; + OperationType ot; + if (eventToAnalyze != null) { + PERF_LOG.debug("PERFORMANCE STATUS : {}", eventToAnalyze.size()); + for (LogEvent event : eventToAnalyze) { + ot = event.getOperationType(); + if (perfStatus.containsKey(ot)) { + pEvent = perfStatus.get(event.getOperationType()); + } else { + pEvent = new PerformanceEvent(ot); + } + pEvent.addLogEvent(event); + perfStatus.put(ot, pEvent); + } + } else { + PERF_LOG.debug("NO EVENTS TO ANALYZE!!!"); + } + } - /** - * - * @param number - * long - */ - public void setPulseNumber(long number) { + public void setPulseNumber(long number) { - this.pulseNumberStr = number + ""; - String prefix = ""; - for (int i = 0; i < (6 - pulseNumberStr.length()); i++) { - prefix += "."; - } - this.pulseNumberStr = prefix + this.pulseNumberStr; - } + this.pulseNumberStr = number + ""; + String prefix = ""; + for (int i = 0; i < (6 - pulseNumberStr.length()); i++) { + prefix += "."; + } + this.pulseNumberStr = prefix + this.pulseNumberStr; + } - @Override - public String toString() { + @Override + public String toString() { - StringBuilder result = new StringBuilder(); - result.append("#" + this.pulseNumberStr + ": "); - if (perfStatus.isEmpty()) { - result.append("No activity in the last " + timeWindows + " seconds"); - } else { - result.append("\n=== last " + timeWindows + " seconds ===\n"); - for (PerformanceEvent pEvent : perfStatus.values()) { - result.append(pEvent); - result.append("\n"); - } - } - // result.append("\n"); - return result.toString(); - } + StringBuilder result = new StringBuilder(); + result.append("#" + this.pulseNumberStr + ": "); + if (perfStatus.isEmpty()) { + result.append("No activity in the last " + timeWindows + " seconds"); + } else { + result.append("\n=== last " + timeWindows + " seconds ===\n"); + for (PerformanceEvent pEvent : perfStatus.values()) { + result.append(pEvent); + result.append("\n"); + } + } + return result.toString(); + } } diff --git a/src/main/java/it/grid/storm/health/StoRMStatus.java b/src/main/java/it/grid/storm/health/StoRMStatus.java index 323fbb67b..fd02f9072 100644 --- a/src/main/java/it/grid/storm/health/StoRMStatus.java +++ b/src/main/java/it/grid/storm/health/StoRMStatus.java @@ -9,203 +9,191 @@ public class StoRMStatus { - private long heapSize = -1L; - private long heapMaxSize = -1L; - private long heapFreeSize = -1L; - private String pulseNumberStr = ""; - - private int ptgRequests = 0; - private int ptgSuccess = 0; - private long ptgMeansTime = -1L; - - private int ptpRequests = 0; - private int ptpSuccess = 0; - private long ptpMeansTime = -1L; - - private long lifetime = -1L; - private String lifetimeStr = ""; - - private long totPtGRequest = 0L; - private long totPtPRequest = 0L; - - private int synchRequest = 0; - - public StoRMStatus() { - - } - - /** - * - * @param heapSize - * long - */ - public void setHeapSize(long heapSize) { - - this.heapSize = heapSize; - } - - /** - * - * @param maxHeapSize - * long - */ - public void setMAXHeapSize(long maxHeapSize) { - - this.heapMaxSize = maxHeapSize; - } - - /** - * - * @param heapFreeSize - * long - */ - public void setHeapFreeSize(long heapFreeSize) { - - this.heapFreeSize = heapFreeSize; - } - - /** - * - * @return int - */ - public int getHeapFreePercentile() { - - int result = 100; - if (this.heapMaxSize > 0) { - double average = this.heapFreeSize / this.heapMaxSize * 100; - result = (int) average; - } - return result; - } - - /** - * - * @param number - * long - */ - public void setPulseNumber(long number) { - - this.pulseNumberStr = number + ""; - String prefix = ""; - for (int i = 0; i < (6 - pulseNumberStr.length()); i++) { - prefix += "."; - } - this.pulseNumberStr = prefix + this.pulseNumberStr; - } - - /** - * - * @param synchRequest - * int - */ - public void setSynchRequest(int synchRequest) { - - this.synchRequest = synchRequest; - } - - /** - * - * @param ptgNumber - * int - */ - public void setPtGNumberRequests(int ptgNumber) { - - this.ptgRequests = ptgNumber; - } - - /** - * - * @param ptpSuccess - * int - */ - public void setPtGSuccessRequests(int ptgSuccess) { - - this.ptgSuccess = ptgSuccess; - } - - public void setTotalPtGRequest(long totPtG) { - - this.totPtGRequest = totPtG; - } - - public void setTotalPtPRequest(long totPtP) { - - this.totPtPRequest = totPtP; - } - - /** - * - * @param meanTime - * long - */ - public void setPtGMeanDuration(long meanTime) { - - this.ptgMeansTime = meanTime; - } - - /** - * - * @param ptpNumber - * int - */ - public void setPtPNumberRequests(int ptpNumber) { - - this.ptpRequests = ptpNumber; - } - - /** - * - * @param ptpSuccess - * int - */ - public void setPtPSuccessRequests(int ptpSuccess) { - - this.ptpSuccess = ptpSuccess; - } - - /** - * - * @param meanTime - * long - */ - public void setPtPMeanDuration(long meanTime) { - - this.ptpMeansTime = meanTime; - } - - public void calculateLifeTime() { - - long bornTime = HealthDirector.getBornInstant(); - long now = System.currentTimeMillis(); - this.lifetime = now - bornTime; - - Date date = new Date(this.lifetime); - SimpleDateFormat formatter = new SimpleDateFormat("mm.ss"); - String minsec = formatter.format(date); - long hours = this.lifetime / 3600000; - this.lifetimeStr = hours + ":" + minsec; - } - - /** - * - * @return String - */ - public String toString() { - - StringBuilder result = new StringBuilder(); - result.append(" [#" + this.pulseNumberStr + " lifetime=" + this.lifetimeStr - + "]"); - result.append(" Heap Free:" + this.heapFreeSize); - result.append(" SYNCH [" + this.synchRequest + "]"); - result.append(" ASynch [PTG:" + this.totPtGRequest); - result.append(" PTP:" + this.totPtPRequest + "]"); - result.append(" Last:( [#PTG=" + this.ptgRequests); - result.append(" OK=" + this.ptgSuccess); - result.append(" M.Dur.=" + this.ptgMeansTime + "]"); - result.append(" [#PTP=" + this.ptpRequests); - result.append(" OK=" + this.ptpSuccess); - result.append(" M.Dur.=" + this.ptpMeansTime + "] )"); - return result.toString(); - } + private long heapSize = -1L; + private long heapMaxSize = -1L; + private long heapFreeSize = -1L; + private String pulseNumberStr = ""; + + private int ptgRequests = 0; + private int ptgSuccess = 0; + private long ptgMeansTime = -1L; + + private int ptpRequests = 0; + private int ptpSuccess = 0; + private long ptpMeansTime = -1L; + + private long lifetime = -1L; + private String lifetimeStr = ""; + + private long totPtGRequest = 0L; + private long totPtPRequest = 0L; + + private int synchRequest = 0; + + public StoRMStatus() { + + } + + /** + * + * @param heapSize long + */ + public void setHeapSize(long heapSize) { + + this.heapSize = heapSize; + } + + /** + * + * @param maxHeapSize long + */ + public void setMAXHeapSize(long maxHeapSize) { + + this.heapMaxSize = maxHeapSize; + } + + /** + * + * @param heapFreeSize long + */ + public void setHeapFreeSize(long heapFreeSize) { + + this.heapFreeSize = heapFreeSize; + } + + /** + * + * @return int + */ + public int getHeapFreePercentile() { + + int result = 100; + if (this.heapMaxSize > 0) { + double average = this.heapFreeSize / this.heapMaxSize * 100; + result = (int) average; + } + return result; + } + + /** + * + * @param number long + */ + public void setPulseNumber(long number) { + + this.pulseNumberStr = number + ""; + String prefix = ""; + for (int i = 0; i < (6 - pulseNumberStr.length()); i++) { + prefix += "."; + } + this.pulseNumberStr = prefix + this.pulseNumberStr; + } + + /** + * + * @param synchRequest int + */ + public void setSynchRequest(int synchRequest) { + + this.synchRequest = synchRequest; + } + + /** + * + * @param ptgNumber int + */ + public void setPtGNumberRequests(int ptgNumber) { + + this.ptgRequests = ptgNumber; + } + + /** + * + * @param ptpSuccess int + */ + public void setPtGSuccessRequests(int ptgSuccess) { + + this.ptgSuccess = ptgSuccess; + } + + public void setTotalPtGRequest(long totPtG) { + + this.totPtGRequest = totPtG; + } + + public void setTotalPtPRequest(long totPtP) { + + this.totPtPRequest = totPtP; + } + + /** + * + * @param meanTime long + */ + public void setPtGMeanDuration(long meanTime) { + + this.ptgMeansTime = meanTime; + } + + /** + * + * @param ptpNumber int + */ + public void setPtPNumberRequests(int ptpNumber) { + + this.ptpRequests = ptpNumber; + } + + /** + * + * @param ptpSuccess int + */ + public void setPtPSuccessRequests(int ptpSuccess) { + + this.ptpSuccess = ptpSuccess; + } + + /** + * + * @param meanTime long + */ + public void setPtPMeanDuration(long meanTime) { + + this.ptpMeansTime = meanTime; + } + + public void calculateLifeTime() { + + long bornTime = HealthMonitor.getInstance().getBornInstant(); + long now = System.currentTimeMillis(); + this.lifetime = now - bornTime; + + Date date = new Date(this.lifetime); + SimpleDateFormat formatter = new SimpleDateFormat("mm.ss"); + String minsec = formatter.format(date); + long hours = this.lifetime / 3600000; + this.lifetimeStr = hours + ":" + minsec; + } + + /** + * + * @return String + */ + public String toString() { + + StringBuilder result = new StringBuilder(); + result.append(" [#" + this.pulseNumberStr + " lifetime=" + this.lifetimeStr + "]"); + result.append(" Heap Free:" + this.heapFreeSize); + result.append(" SYNCH [" + this.synchRequest + "]"); + result.append(" ASynch [PTG:" + this.totPtGRequest); + result.append(" PTP:" + this.totPtPRequest + "]"); + result.append(" Last:( [#PTG=" + this.ptgRequests); + result.append(" OK=" + this.ptgSuccess); + result.append(" M.Dur.=" + this.ptgMeansTime + "]"); + result.append(" [#PTP=" + this.ptpRequests); + result.append(" OK=" + this.ptpSuccess); + result.append(" M.Dur.=" + this.ptpMeansTime + "] )"); + return result.toString(); + } } diff --git a/src/main/java/it/grid/storm/health/external/FSMetadataStatus.java b/src/main/java/it/grid/storm/health/external/FSMetadataStatus.java index f2b39d1b2..fe837cf8f 100644 --- a/src/main/java/it/grid/storm/health/external/FSMetadataStatus.java +++ b/src/main/java/it/grid/storm/health/external/FSMetadataStatus.java @@ -7,83 +7,65 @@ */ package it.grid.storm.health.external; -import it.grid.storm.health.HealthDirector; - import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Date; import java.util.Enumeration; import java.util.Hashtable; -/** - * @author zappi - * - */ +import it.grid.storm.health.HealthMonitor; + public class FSMetadataStatus { - private String pulseNumberStr = ""; - private long lifetime = -1L; - private String lifetimeStr = ""; - private final int benchmarkCount = -1; - private final Hashtable pathName = new Hashtable(); + private String pulseNumberStr = ""; + private long lifetime = -1L; + private String lifetimeStr = ""; + private final Hashtable pathName = new Hashtable(); - /** - * - */ - public FSMetadataStatus(ArrayList storageAreasName) { + public FSMetadataStatus(ArrayList storageAreasName) { - super(); - pathName.put("Local", -1L); - for (Object element : storageAreasName) { - pathName.put((String) element, -1L); - } - } + super(); + pathName.put("Local", -1L); + for (Object element : storageAreasName) { + pathName.put((String) element, -1L); + } + } - /** - * - * @param number - * long - */ - public void setPulseNumber(long number) { + public void setPulseNumber(long number) { - this.pulseNumberStr = number + ""; - String prefix = ""; - for (int i = 0; i < (6 - pulseNumberStr.length()); i++) { - prefix += "."; - } - this.pulseNumberStr = prefix + this.pulseNumberStr; - } + this.pulseNumberStr = number + ""; + String prefix = ""; + for (int i = 0; i < (6 - pulseNumberStr.length()); i++) { + prefix += "."; + } + this.pulseNumberStr = prefix + this.pulseNumberStr; + } - public void calculateLifeTime() { + public void calculateLifeTime() { - long bornTime = HealthDirector.getBornInstant(); - long now = System.currentTimeMillis(); - this.lifetime = now - bornTime; + long bornTime = HealthMonitor.getInstance().getBornInstant(); + long now = System.currentTimeMillis(); + this.lifetime = now - bornTime; - Date date = new Date(this.lifetime); - SimpleDateFormat formatter = new SimpleDateFormat("mm.ss"); - String minsec = formatter.format(date); - long hours = this.lifetime / 3600000; - this.lifetimeStr = hours + ":" + minsec; - } + Date date = new Date(this.lifetime); + SimpleDateFormat formatter = new SimpleDateFormat("mm.ss"); + String minsec = formatter.format(date); + long hours = this.lifetime / 3600000; + this.lifetimeStr = hours + ":" + minsec; + } - /** - * - * @return String - */ - @Override - public String toString() { + @Override + public String toString() { - StringBuilder result = new StringBuilder(); - result.append(" [#" + this.pulseNumberStr + " lifetime=" + this.lifetimeStr - + "]"); - Enumeration sas = pathName.keys(); - while (sas.hasMoreElements()) { - String sa = sas.nextElement(); - Long average = pathName.get(sa); - result.append("SA('" + sa + "')=" + average); - } - return result.toString(); - } + StringBuilder result = new StringBuilder(); + result.append(" [#" + this.pulseNumberStr + " lifetime=" + this.lifetimeStr + "]"); + Enumeration sas = pathName.keys(); + while (sas.hasMoreElements()) { + String sa = sas.nextElement(); + Long average = pathName.get(sa); + result.append("SA('" + sa + "')=" + average); + } + return result.toString(); + } } diff --git a/src/main/java/it/grid/storm/info/SpaceInfoManager.java b/src/main/java/it/grid/storm/info/SpaceInfoManager.java index e23ad360e..364d00ab3 100644 --- a/src/main/java/it/grid/storm/info/SpaceInfoManager.java +++ b/src/main/java/it/grid/storm/info/SpaceInfoManager.java @@ -4,7 +4,7 @@ */ package it.grid.storm.info; -import static it.grid.storm.config.Configuration.DISKUSAGE_SERVICE_ENABLED; +import static it.grid.storm.config.StormConfiguration.DISKUSAGE_SERVICE_ENABLED_KEY; import java.io.FileNotFoundException; import java.util.List; @@ -16,10 +16,8 @@ import com.google.common.collect.Lists; import it.grid.storm.catalogs.ReservedSpaceCatalog; -import it.grid.storm.common.types.SizeUnit; -import it.grid.storm.config.Configuration; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceInterface; +import it.grid.storm.config.StormConfiguration; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.persistence.exceptions.DataAccessException; import it.grid.storm.space.StorageSpaceData; @@ -34,14 +32,14 @@ public class SpaceInfoManager { private static final SpaceInfoManager instance = new SpaceInfoManager(); private static final String USED_SPACE_INI_FILEPATH = - Configuration.getInstance().configurationDir() + "/used-space.ini".replaceAll("/+", "/"); + StormConfiguration.getInstance().configurationDir() + "/used-space.ini".replaceAll("/+", "/"); private static final Logger log = LoggerFactory.getLogger(SpaceInfoManager.class); // Reference to the Catalog - private final ReservedSpaceCatalog spaceCatalog = new ReservedSpaceCatalog(); + private final ReservedSpaceCatalog spaceCatalog = ReservedSpaceCatalog.getInstance(); // Reference to the NamespaceDirector - private final NamespaceInterface namespace = NamespaceDirector.getNamespace(); + private final Namespace namespace = Namespace.getInstance(); private SpaceInfoManager() {} @@ -71,14 +69,14 @@ public void initializeUsedSpace() { return; } - if (Configuration.getInstance().getDiskUsageServiceEnabled()) { + if (StormConfiguration.getInstance().getDiskUsageServiceEnabled()) { log.info("The remaining {} storage spaces will be initialized by DiskUsage service", ssni.size()); } else { log.warn( "The remaining {} storage spaces WON'T be initialized with DUs. " + "Please enable DiskUsage service by setting '{}' as true.", - ssni.size(), DISKUSAGE_SERVICE_ENABLED); + ssni.size(), DISKUSAGE_SERVICE_ENABLED_KEY); } } @@ -142,7 +140,7 @@ private void updateUsedSpaceOnPersistence(SaUsedSize usedSize) { if (ssd != null) { try { - ssd.setUsedSpaceSize(TSizeInBytes.make(usedSize.getUsedSize(), SizeUnit.BYTES)); + ssd.setUsedSpaceSize(TSizeInBytes.make(usedSize.getUsedSize())); spaceCatalog.updateStorageSpace(ssd); log.debug("StorageSpace table updated for SA: '{}' with used size = {}", usedSize.getSaName(), usedSize.getUsedSize()); diff --git a/src/main/java/it/grid/storm/info/du/DiskUsageTask.java b/src/main/java/it/grid/storm/info/du/DiskUsageTask.java index 3f55223d9..7e2cd2d08 100644 --- a/src/main/java/it/grid/storm/info/du/DiskUsageTask.java +++ b/src/main/java/it/grid/storm/info/du/DiskUsageTask.java @@ -13,7 +13,6 @@ import com.google.common.base.Preconditions; import it.grid.storm.catalogs.ReservedSpaceCatalog; -import it.grid.storm.common.types.SizeUnit; import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.persistence.exceptions.DataAccessException; import it.grid.storm.space.DUResult; @@ -25,11 +24,12 @@ public class DiskUsageTask implements Runnable { private static final Logger log = LoggerFactory.getLogger(DiskUsageTask.class); - private final ReservedSpaceCatalog spaceCatalog = new ReservedSpaceCatalog(); + private final ReservedSpaceCatalog catalog; private VirtualFS vfs; public DiskUsageTask(VirtualFS vfs) { this.vfs = vfs; + this.catalog = ReservedSpaceCatalog.getInstance(); } @Override @@ -61,7 +61,7 @@ private void updateUsedSpaceOnPersistence(String spaceToken, DUResult duResult) Preconditions.checkNotNull(spaceToken, "Received null spaceToken!"); Preconditions.checkNotNull(duResult, "Received null duResult!"); - StorageSpaceData ssd = spaceCatalog.getStorageSpaceByAlias(spaceToken); + StorageSpaceData ssd = catalog.getStorageSpaceByAlias(spaceToken); if (ssd == null) { failPersistence(spaceToken, "Unable to retrieve StorageSpaceData"); @@ -70,8 +70,8 @@ private void updateUsedSpaceOnPersistence(String spaceToken, DUResult duResult) try { - ssd.setUsedSpaceSize(TSizeInBytes.make(duResult.getSizeInBytes(), SizeUnit.BYTES)); - spaceCatalog.updateStorageSpace(ssd); + ssd.setUsedSpaceSize(TSizeInBytes.make(duResult.getSizeInBytes())); + catalog.updateStorageSpace(ssd); log.debug("StorageSpace table updated for SA: '{}' with used size = {}", spaceToken, duResult.getSizeInBytes()); diff --git a/src/main/java/it/grid/storm/info/model/SpaceStatusSummary.java b/src/main/java/it/grid/storm/info/model/SpaceStatusSummary.java index 9c73c85d7..b9c02491c 100644 --- a/src/main/java/it/grid/storm/info/model/SpaceStatusSummary.java +++ b/src/main/java/it/grid/storm/info/model/SpaceStatusSummary.java @@ -21,287 +21,270 @@ public class SpaceStatusSummary { - protected final String saAlias; - /** defined in config/db (static value) **/ - protected final long totalSpace; - /** defined in config/db (static value) **/ - // published by DIP - - protected long usedSpace = -1; - /** info retrieved by sensors **/ - // published by DIP - protected long unavailableSpace = -1; - /** info retrieved by sensors **/ - protected long reservedSpace = -1; - /** info retrieved from DB **/ - // published by DIP SETTED TO ZERO BECAUSE CURRENTLY RETURN FAKE VALUES - // For now do not consider the reserved space, a better management is needed - - private static final ReservedSpaceCatalog catalog = new ReservedSpaceCatalog(); - - private static final Logger log = LoggerFactory - .getLogger(SpaceStatusSummary.class); - - /***************************** - * Constructors - */ - - /** - * @param saAlias - * @param totalSpace - * @throws IllegalArgumentException - */ - public SpaceStatusSummary(String saAlias, long totalSpace) - throws IllegalArgumentException { - - if (totalSpace < 0 || saAlias == null) { - log - .error("Unable to create SpaceStatusSummary. Received illegal parameter: saAlias: " - + saAlias + " totalSpace: " + totalSpace); - throw new IllegalArgumentException( - "Unable to create SpaceStatusSummary. Received illegal parameter"); - } - this.saAlias = saAlias; - this.totalSpace = totalSpace; - } - - private SpaceStatusSummary(String saAlias, long usedSpace, - long unavailableSpace, long reservedSpace, long totalSpace) { - - this.saAlias = saAlias; - this.usedSpace = usedSpace; - this.unavailableSpace = unavailableSpace; - this.reservedSpace = reservedSpace; - this.totalSpace = totalSpace; - } - - /** - * Produce a SpaceStatusSummary with fields matching exactly the ones - * available on the database - * - * @param saAlias - * @return - * @throws IllegalArgumentException - */ - public static SpaceStatusSummary createFromDB(String saAlias) - throws IllegalArgumentException { - - StorageSpaceData storageSpaceData = catalog.getStorageSpaceByAlias(saAlias); - if (storageSpaceData == null) { - throw new IllegalArgumentException( - "Unable to find a storage space row for alias \'" + saAlias - + "\' from storm Database"); - } else { - if (!storageSpaceData.isInitialized()) { - log - .warn("Building the SpaceStatusSummary from non initialized space with alias \'" - + saAlias + "\'"); - } - SpaceStatusSummary summary = new SpaceStatusSummary(saAlias, - storageSpaceData.getUsedSpaceSize().value(), storageSpaceData - .getUnavailableSpaceSize().value(), storageSpaceData - .getReservedSpaceSize().value(), storageSpaceData.getTotalSpaceSize() - .value()); - return summary; - } - } - - /***************************** - * GETTER methods - ****************************/ - - /** - * @return the saAlias - */ - public String getSaAlias() { - - return saAlias; - } - - /** - * busySpace = used + unavailable + reserved - * - * @return the busySpace - */ - public long getBusySpace() { - - return this.usedSpace + this.reservedSpace + this.unavailableSpace; - } - - /** - * availableSpace = totalSpace - busySpace - * - * @return - */ - public long getAvailableSpace() { - - return this.totalSpace - this.getBusySpace(); - } - - /** - * @return the usedSpace - */ - public long getUsedSpace() { - - return usedSpace; - } - - /** - * @return the unavailableSpace - */ - public long getUnavailableSpace() { - - return unavailableSpace; - } - - /** - * @return the reservedSpace - */ - public long getReservedSpace() { - - return reservedSpace; - } - - /** - * @return the totalSpace - */ - public long getTotalSpace() { - - return totalSpace; - } - - /** - * Real One freeSpace = totalSpace - used - reserved For now... freeSpace = - * totalSpace - used - * - * @return the freeSpace - */ - public long getFreeSpace() { - - if (this.totalSpace >= 0) { - // For now do not consider the reserved space, a better management is - // needed - // this.freeSpace = this.totalSpace - this.usedSpace - this.reservedSpace; - return this.totalSpace - this.usedSpace; - } else { - return -1; - } - } - - /***************************** - * SETTER methods - ****************************/ - - /** - * @param usedSpace - * the usedSpace to set - */ - public void setUsedSpace(long usedSpace) { - - this.usedSpace = usedSpace; - } - - /** - * @param unavailableSpace - * the unavailableSpace to set - */ - public void setUnavailableSpace(long unavailableSpace) { - - this.unavailableSpace = unavailableSpace; - } - - /** - * @param reservedSpace - * the reservedSpace to set - */ - public void setReservedSpace(long reservedSpace) { - - this.reservedSpace = reservedSpace; - } - - /******************************* - * JSON Building - */ - - /** - * String saAlias; long busySpace; // busySpace = used + unavailable + - * reserved long usedSpace; //info retrieved by sensors long unavailableSpace; - * // info retrieved by sensors long reservedSpace; // info retrieved from DB - * long totalSpace; // defined in config/db (static value) long freeSpace; // - * freeSpace = totalSpace - used - reserved; - */ - public String getJsonFormat() { - - String result = ""; - StringWriter strWriter = new StringWriter(); - Configuration config = new Configuration(); - MappedNamespaceConvention con = new MappedNamespaceConvention(config); - - try { - AbstractXMLStreamWriter w = new MappedXMLStreamWriter(con, strWriter); - w.writeStartDocument(); - // start main element - w.writeStartElement("sa-status"); - // Alias - w.writeStartElement("alias"); - w.writeCharacters(this.getSaAlias()); - w.writeEndElement(); - // busy space - w.writeStartElement("busy-space"); - w.writeCharacters("" + this.getBusySpace()); - w.writeEndElement(); - // used space - w.writeStartElement("used-space"); - w.writeCharacters("" + this.getUsedSpace()); - w.writeEndElement(); - // unavailable space - w.writeStartElement("unavailable-space"); - w.writeCharacters("" + this.getUnavailableSpace()); - w.writeEndElement(); - // reserved space - w.writeStartElement("reserved-space"); - w.writeCharacters("" + this.getReservedSpace()); - w.writeEndElement(); - // total space - w.writeStartElement("total-space"); - w.writeCharacters("" + this.getTotalSpace()); - w.writeEndElement(); - // free space - w.writeStartElement("free-space"); - w.writeCharacters("" + this.getFreeSpace()); - w.writeEndElement(); - // available space - w.writeStartElement("available-space"); - w.writeCharacters("" + this.getAvailableSpace()); - w.writeEndElement(); - // end main element - w.writeEndElement(); - w.writeEndDocument(); - w.close(); - } catch (XMLStreamException e) { - log - .error("Unable to produce Json representation of the object. XMLStreamException: " - + e.getMessage()); - } - try { - strWriter.close(); - } catch (IOException e) { - log - .error("Unable to close the StringWriter for Json representation of the object. IOException: " - + e.getMessage()); - } - result = strWriter.toString(); - return result; - } - - @Override - public String toString() { - - return "SpaceStatusSummary [getSaAlias()=" + getSaAlias() - + ", getBusySpace()=" + getBusySpace() + ", getAvailableSpace()=" - + getAvailableSpace() + ", getUsedSpace()=" + getUsedSpace() - + ", getUnavailableSpace()=" + getUnavailableSpace() - + ", getReservedSpace()=" + getReservedSpace() + ", getTotalSpace()=" - + getTotalSpace() + ", getFreeSpace()=" + getFreeSpace() + "]"; - } + protected final String saAlias; + /** defined in config/db (static value) **/ + protected final long totalSpace; + /** defined in config/db (static value) **/ + // published by DIP + + protected long usedSpace = -1; + /** info retrieved by sensors **/ + // published by DIP + protected long unavailableSpace = -1; + /** info retrieved by sensors **/ + protected long reservedSpace = -1; + /** info retrieved from DB **/ + + private static final Logger log = LoggerFactory.getLogger(SpaceStatusSummary.class); + + /***************************** + * Constructors + */ + + /** + * @param saAlias + * @param totalSpace + * @throws IllegalArgumentException + */ + public SpaceStatusSummary(String saAlias, long totalSpace) throws IllegalArgumentException { + + if (totalSpace < 0 || saAlias == null) { + log.error("Unable to create SpaceStatusSummary. Received illegal parameter: saAlias: " + + saAlias + " totalSpace: " + totalSpace); + throw new IllegalArgumentException( + "Unable to create SpaceStatusSummary. Received illegal parameter"); + } + this.saAlias = saAlias; + this.totalSpace = totalSpace; + } + + private SpaceStatusSummary(String saAlias, long usedSpace, long unavailableSpace, + long reservedSpace, long totalSpace) { + + this.saAlias = saAlias; + this.usedSpace = usedSpace; + this.unavailableSpace = unavailableSpace; + this.reservedSpace = reservedSpace; + this.totalSpace = totalSpace; + } + + /** + * Produce a SpaceStatusSummary with fields matching exactly the ones available on the database + * + * @param saAlias + * @return + * @throws IllegalArgumentException + */ + public static SpaceStatusSummary createFromDB(String saAlias) throws IllegalArgumentException { + + StorageSpaceData storageSpaceData = + ReservedSpaceCatalog.getInstance().getStorageSpaceByAlias(saAlias); + if (storageSpaceData == null) { + throw new IllegalArgumentException( + "Unable to find a storage space row for alias \'" + saAlias + "\' from storm Database"); + } else { + if (!storageSpaceData.isInitialized()) { + log.warn("Building the SpaceStatusSummary from non initialized space with alias \'" + + saAlias + "\'"); + } + SpaceStatusSummary summary = + new SpaceStatusSummary(saAlias, storageSpaceData.getUsedSpaceSize().value(), + storageSpaceData.getUnavailableSpaceSize().value(), + storageSpaceData.getReservedSpaceSize().value(), + storageSpaceData.getTotalSpaceSize().value()); + return summary; + } + } + + /***************************** + * GETTER methods + ****************************/ + + /** + * @return the saAlias + */ + public String getSaAlias() { + + return saAlias; + } + + /** + * busySpace = used + unavailable + reserved + * + * @return the busySpace + */ + public long getBusySpace() { + + return this.usedSpace + this.reservedSpace + this.unavailableSpace; + } + + /** + * availableSpace = totalSpace - busySpace + * + * @return + */ + public long getAvailableSpace() { + + return this.totalSpace - this.getBusySpace(); + } + + /** + * @return the usedSpace + */ + public long getUsedSpace() { + + return usedSpace; + } + + /** + * @return the unavailableSpace + */ + public long getUnavailableSpace() { + + return unavailableSpace; + } + + /** + * @return the reservedSpace + */ + public long getReservedSpace() { + + return reservedSpace; + } + + /** + * @return the totalSpace + */ + public long getTotalSpace() { + + return totalSpace; + } + + /** + * Real One freeSpace = totalSpace - used - reserved For now... freeSpace = totalSpace - used + * + * @return the freeSpace + */ + public long getFreeSpace() { + + if (this.totalSpace >= 0) { + // For now do not consider the reserved space, a better management is + // needed + // this.freeSpace = this.totalSpace - this.usedSpace - this.reservedSpace; + return this.totalSpace - this.usedSpace; + } else { + return -1; + } + } + + /***************************** + * SETTER methods + ****************************/ + + /** + * @param usedSpace the usedSpace to set + */ + public void setUsedSpace(long usedSpace) { + + this.usedSpace = usedSpace; + } + + /** + * @param unavailableSpace the unavailableSpace to set + */ + public void setUnavailableSpace(long unavailableSpace) { + + this.unavailableSpace = unavailableSpace; + } + + /** + * @param reservedSpace the reservedSpace to set + */ + public void setReservedSpace(long reservedSpace) { + + this.reservedSpace = reservedSpace; + } + + /******************************* + * JSON Building + */ + + /** + * String saAlias; long busySpace; // busySpace = used + unavailable + reserved long usedSpace; + * //info retrieved by sensors long unavailableSpace; // info retrieved by sensors long + * reservedSpace; // info retrieved from DB long totalSpace; // defined in config/db (static + * value) long freeSpace; // freeSpace = totalSpace - used - reserved; + */ + public String getJsonFormat() { + + String result = ""; + StringWriter strWriter = new StringWriter(); + Configuration config = new Configuration(); + MappedNamespaceConvention con = new MappedNamespaceConvention(config); + + try { + AbstractXMLStreamWriter w = new MappedXMLStreamWriter(con, strWriter); + w.writeStartDocument(); + // start main element + w.writeStartElement("sa-status"); + // Alias + w.writeStartElement("alias"); + w.writeCharacters(this.getSaAlias()); + w.writeEndElement(); + // busy space + w.writeStartElement("busy-space"); + w.writeCharacters("" + this.getBusySpace()); + w.writeEndElement(); + // used space + w.writeStartElement("used-space"); + w.writeCharacters("" + this.getUsedSpace()); + w.writeEndElement(); + // unavailable space + w.writeStartElement("unavailable-space"); + w.writeCharacters("" + this.getUnavailableSpace()); + w.writeEndElement(); + // reserved space + w.writeStartElement("reserved-space"); + w.writeCharacters("" + this.getReservedSpace()); + w.writeEndElement(); + // total space + w.writeStartElement("total-space"); + w.writeCharacters("" + this.getTotalSpace()); + w.writeEndElement(); + // free space + w.writeStartElement("free-space"); + w.writeCharacters("" + this.getFreeSpace()); + w.writeEndElement(); + // available space + w.writeStartElement("available-space"); + w.writeCharacters("" + this.getAvailableSpace()); + w.writeEndElement(); + // end main element + w.writeEndElement(); + w.writeEndDocument(); + w.close(); + } catch (XMLStreamException e) { + log.error("Unable to produce Json representation of the object. XMLStreamException: " + + e.getMessage()); + } + try { + strWriter.close(); + } catch (IOException e) { + log.error( + "Unable to close the StringWriter for Json representation of the object. IOException: " + + e.getMessage()); + } + result = strWriter.toString(); + return result; + } + + @Override + public String toString() { + + return "SpaceStatusSummary [getSaAlias()=" + getSaAlias() + ", getBusySpace()=" + getBusySpace() + + ", getAvailableSpace()=" + getAvailableSpace() + ", getUsedSpace()=" + getUsedSpace() + + ", getUnavailableSpace()=" + getUnavailableSpace() + ", getReservedSpace()=" + + getReservedSpace() + ", getTotalSpace()=" + getTotalSpace() + ", getFreeSpace()=" + + getFreeSpace() + "]"; + } } diff --git a/src/main/java/it/grid/storm/metrics/InstrumentedBasicDataSource.java b/src/main/java/it/grid/storm/metrics/InstrumentedBasicDataSource.java new file mode 100644 index 000000000..dea9835e9 --- /dev/null +++ b/src/main/java/it/grid/storm/metrics/InstrumentedBasicDataSource.java @@ -0,0 +1,123 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.metrics; + +import static com.codahale.metrics.MetricRegistry.name; + +import java.sql.Connection; +import java.sql.SQLException; + +import org.apache.commons.dbcp2.BasicDataSource; + +import com.codahale.metrics.Gauge; +import com.codahale.metrics.JmxReporter; +import com.codahale.metrics.MetricRegistry; +import com.codahale.metrics.RatioGauge; +import com.codahale.metrics.Timer; + +public class InstrumentedBasicDataSource extends BasicDataSource { + + private final Timer getConnectionTimer; + private final JmxReporter reporter; + + public InstrumentedBasicDataSource(String prefix, MetricRegistry registry) { + instrument(prefix, registry, this); + getConnectionTimer = registry.timer(name(prefix, "get-connection")); + reporter = JmxReporter.forRegistry(registry).build(); + reporter.start(); + } + + /** + * Instrument the given BasicDataSource instance with a series of timers and gauges. + * + */ + public static void instrument(String prefix, MetricRegistry registry, + final BasicDataSource datasource) { + + registry.register(name(prefix, "initial-size"), new Gauge() { + public Integer getValue() { + return datasource.getInitialSize(); + } + }); + registry.register(name(prefix, "max-idle"), new Gauge() { + public Integer getValue() { + return datasource.getMaxIdle(); + } + }); + registry.register(name(prefix, "max-open-prepared-statements"), new Gauge() { + public Integer getValue() { + return datasource.getMaxOpenPreparedStatements(); + } + }); + registry.register(name(prefix, "max-wait-millis"), new Gauge() { + public Long getValue() { + return datasource.getMaxWaitMillis(); + } + }); + registry.register(name(prefix, "min-evictable-idle-time-millis"), new Gauge() { + public Long getValue() { + return datasource.getMinEvictableIdleTimeMillis(); + } + }); + registry.register(name(prefix, "min-idle"), new Gauge() { + public Integer getValue() { + return datasource.getMinIdle(); + } + }); + registry.register(name(prefix, "num-active"), new Gauge() { + public Integer getValue() { + return datasource.getNumActive(); + } + }); + registry.register(name(prefix, "max-total"), new Gauge() { + public Integer getValue() { + return datasource.getMaxTotal(); + } + }); + registry.register(name(prefix, "num-idle"), new Gauge() { + public Integer getValue() { + return datasource.getNumIdle(); + } + }); + registry.register(name(prefix, "num-tests-per-eviction-run"), new Gauge() { + public Integer getValue() { + return datasource.getNumTestsPerEvictionRun(); + } + }); + registry.register(name(prefix, "time-between-eviction-runs-millis"), new Gauge() { + public Long getValue() { + return datasource.getTimeBetweenEvictionRunsMillis(); + } + }); + registry.register(name(prefix, "percent-idle"), new RatioGauge() { + @Override + protected Ratio getRatio() { + return Ratio.of(datasource.getNumIdle(), datasource.getMaxIdle()); + } + }); + registry.register(name(prefix, "percent-active"), new RatioGauge() { + @Override + protected Ratio getRatio() { + return Ratio.of(datasource.getNumActive(), datasource.getMaxTotal()); + } + }); + } + + @Override + public Connection getConnection() throws SQLException { + final Timer.Context ctx = getConnectionTimer.time(); + try { + return super.getConnection(); + } finally { + ctx.stop(); + } + } + + @Override + public synchronized void close() throws SQLException { + super.close(); + reporter.stop(); + } +} \ No newline at end of file diff --git a/src/main/java/it/grid/storm/namespace/Namespace.java b/src/main/java/it/grid/storm/namespace/Namespace.java index a6ae3869b..cddfc309f 100644 --- a/src/main/java/it/grid/storm/namespace/Namespace.java +++ b/src/main/java/it/grid/storm/namespace/Namespace.java @@ -16,7 +16,13 @@ import java.util.UUID; import java.util.stream.Collectors; +import javax.xml.parsers.ParserConfigurationException; + +import org.apache.commons.configuration.ConfigurationException; import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.w3c.dom.DOMException; +import org.xml.sax.SAXException; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; @@ -30,7 +36,10 @@ import it.grid.storm.griduser.AbstractGridUser; import it.grid.storm.griduser.CannotMapUserException; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.namespace.config.NamespaceLoader; import it.grid.storm.namespace.config.NamespaceParser; +import it.grid.storm.namespace.config.xml.XMLNamespaceLoader; +import it.grid.storm.namespace.config.xml.XMLNamespaceParser; import it.grid.storm.namespace.model.ApproachableRule; import it.grid.storm.namespace.model.Capability; import it.grid.storm.namespace.model.MappingRule; @@ -44,14 +53,33 @@ import it.grid.storm.srm.types.TSizeInBytes; import it.grid.storm.srm.types.TSpaceToken; -public class Namespace implements NamespaceInterface { +public class Namespace { + + private static Namespace instance = null; + + private static final Logger log = LoggerFactory.getLogger(Namespace.class); private static final String SPACE_FILE_NAME_SUFFIX = ".space"; private static final char SPACE_FILE_NAME_SEPARATOR = '_'; - private final Logger log = NamespaceDirector.getLogger(); + private final NamespaceParser parser; - public Namespace(NamespaceParser parser) { + public static void init(String namespaceFilePath, boolean semanticCheckEnabled) + throws DOMException, ConfigurationException, ParserConfigurationException, SAXException, + IOException, NamespaceException { + + log.info("Initializing Namespace from {} ...", namespaceFilePath); + NamespaceLoader loader = new XMLNamespaceLoader(namespaceFilePath); + + instance = new Namespace(new XMLNamespaceParser(loader, semanticCheckEnabled)); + + } + + public static Namespace getInstance() { + return instance; + } + + private Namespace(NamespaceParser parser) { this.parser = parser; } @@ -61,19 +89,16 @@ public String getNamespaceVersion() throws NamespaceException { return parser.getNamespaceVersion(); } - @Override public List getAllDefinedVFS() { return parser.getVFSs().values().stream().collect(Collectors.toList()); } - @Override public Map getAllDefinedVFSAsDictionary() { return parser.getMapVFS_Root(); } - @Override public List getAllDefinedMappingRules() { return parser.getMappingRules().values().stream().collect(Collectors.toList()); @@ -91,7 +116,6 @@ public List getApproachableVFS(GridUserInterface user) { return approachVFS; } - @Override public List getApproachableByAnonymousVFS() throws NamespaceException { List anonymousVFS = Lists.newLinkedList(); @@ -106,7 +130,6 @@ public List getApproachableByAnonymousVFS() throws NamespaceException return anonymousVFS; } - @Override public List getReadableByAnonymousVFS() throws NamespaceException { List readableVFS = Lists.newLinkedList(); @@ -121,9 +144,7 @@ public List getReadableByAnonymousVFS() throws NamespaceException { return readableVFS; } - @Override - public List getReadableOrApproachableByAnonymousVFS() - throws NamespaceException { + public List getReadableOrApproachableByAnonymousVFS() throws NamespaceException { List rowVFS = Lists.newLinkedList(); List allVFS = Lists.newLinkedList(getAllDefinedVFS()); @@ -374,8 +395,7 @@ public VirtualFS resolveVFSbyRoot(String absolutePath) throws NamespaceException return getWinnerVFS(absolutePath, parser.getMapVFS_Root()); } - public VirtualFS resolveVFSbyAbsolutePath(String absolutePath) - throws NamespaceException { + public VirtualFS resolveVFSbyAbsolutePath(String absolutePath) throws NamespaceException { return getWinnerVFS(absolutePath, parser.getMapVFS_Root()); } @@ -540,8 +560,7 @@ public SortedSet getApproachableRules(GridUserInterface user) * @param appRule ApproachableRule * @return VirtualFS */ - public VirtualFS getApproachableDefaultVFS(ApproachableRule appRule) - throws NamespaceException { + public VirtualFS getApproachableDefaultVFS(ApproachableRule appRule) throws NamespaceException { VirtualFS defaultVFS = null; String defaultVFSName = null; @@ -579,8 +598,7 @@ private static boolean matchSubject(ApproachableRule approachableRule, GridUserI return result; } - public VirtualFS resolveVFSbySpaceToken(TSpaceToken spaceToken) - throws NamespaceException { + public VirtualFS resolveVFSbySpaceToken(TSpaceToken spaceToken) throws NamespaceException { Optional vfs = getAllDefinedVFS().stream().filter(v -> spaceToken.equals(v.getSpaceToken())).findFirst(); diff --git a/src/main/java/it/grid/storm/namespace/NamespaceDirector.java b/src/main/java/it/grid/storm/namespace/NamespaceDirector.java deleted file mode 100644 index 85d063703..000000000 --- a/src/main/java/it/grid/storm/namespace/NamespaceDirector.java +++ /dev/null @@ -1,107 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.namespace; - -import it.grid.storm.config.Configuration; -import it.grid.storm.namespace.config.NamespaceLoader; -import it.grid.storm.namespace.config.NamespaceParser; -import it.grid.storm.namespace.config.xml.XMLNamespaceLoader; -import it.grid.storm.namespace.config.xml.XMLNamespaceParser; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class NamespaceDirector { - - private static final Logger log = LoggerFactory - .getLogger(NamespaceDirector.class);; - private static NamespaceInterface namespaceIstance = null; - - private static NamespaceLoader loader; - private static NamespaceParser parser; - - private static boolean initialized = false; - - private NamespaceDirector() {} - - public static void initializeDirector() { - - log.info("NAMESPACE : Initializing ..."); - Configuration config = Configuration.getInstance(); - - log.info(" +++++++++++++++++++++++ "); - log.info(" Production Mode "); - log.info(" +++++++++++++++++++++++ "); - - String configurationPATH = config.namespaceConfigPath(); - String namespaceConfigFileName = config.getNamespaceConfigFilename(); - int refreshInSeconds = config.getNamespaceConfigRefreshRateInSeconds(); - loader = new XMLNamespaceLoader(configurationPATH, namespaceConfigFileName, refreshInSeconds); - - // Check the validity of namespace. - if (loader instanceof XMLNamespaceLoader) { - XMLNamespaceLoader xmlLoader = (XMLNamespaceLoader) loader; - if (!(xmlLoader.schemaValidity)) { - // Error into the validity ckeck of namespace - log.error("Namespace configuration is not conformant with namespae grammar."); - log.error("Please validate namespace configuration file."); - System.exit(0); - } - } - - log.debug("Namespace Configuration PATH : {}" , configurationPATH); - log.debug("Namespace Configuration FILENAME : {}" , namespaceConfigFileName); - log.debug("Namespace Configuration GLANCE RATE : {}" , refreshInSeconds); - - parser = new XMLNamespaceParser(loader); - namespaceIstance = new Namespace(parser); - - log.debug("NAMESPACE INITIALIZATION : ... done!"); - initialized = true; - - } - - /** - * - * @return Namespace - */ - public static NamespaceInterface getNamespace() { - - if (!(initialized)) { - initializeDirector(); - } - return namespaceIstance; - } - - /** - * - * @return Namespace - */ - public static NamespaceParser getNamespaceParser() { - - if (!(initialized)) { - initializeDirector(); - } - return parser; - } - - /** - * - * @return Namespace - */ - public static NamespaceLoader getNamespaceLoader() { - - if (!(initialized)) { - initializeDirector(); - } - return loader; - } - - public static Logger getLogger() { - - return log; - } - -} diff --git a/src/main/java/it/grid/storm/namespace/NamespaceInterface.java b/src/main/java/it/grid/storm/namespace/NamespaceInterface.java deleted file mode 100644 index fded9e918..000000000 --- a/src/main/java/it/grid/storm/namespace/NamespaceInterface.java +++ /dev/null @@ -1,255 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.namespace; - -import java.util.List; -import java.util.Map; - -import it.grid.storm.common.types.PFN; -import it.grid.storm.filesystem.Space; -import it.grid.storm.griduser.GridUserInterface; -import it.grid.storm.namespace.model.MappingRule; -import it.grid.storm.namespace.model.VirtualFS; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TSpaceToken; - -public interface NamespaceInterface { - - /** - * getAllDefinedVFS - * - * @return List : Return a List of VirtualFS containing all the instances - * defined within Namespace - * @throws NamespaceException - */ - public List getAllDefinedVFS(); - - /** - * getAllDefinedVFSAsDictionary - * - * @return Map : Return a Map of all VirtualFS defined within - * Namespace, indexed by their root-paths - * @throws NamespaceException - */ - public Map getAllDefinedVFSAsDictionary(); - - /** - * getVFSWithQuotaEnabled - * - * @return Collection: Return a collection of VirtualFS with fs type GPFS and - * quota enabled - * @throws NamespaceException - */ - public List getVFSWithQuotaEnabled(); - - /** - * getAllDefinedMappingRules - * - * @return List : Return a List of mapping rules containing all the instances defined - * within Namespace - * @throws NamespaceException - */ - public List getAllDefinedMappingRules(); - - /** - * - * - * - * @param user GridUserInterface : Represents the principal - * @return List : Return a List of VirtualFS instances - * @throws NamespaceException : Occur when - */ - public List getApproachableVFS(GridUserInterface user) - throws NamespaceException; - - /** - * - * @return List : Return a List of readable and writable by anonymous users VirtualFS instances - * @throws NamespaceException - */ - public List getApproachableByAnonymousVFS() throws NamespaceException; - - /** - * - * @return List : Return a List of readable by anonymous users VirtualFS instances - * @throws NamespaceException - */ - public List getReadableByAnonymousVFS() throws NamespaceException; - - /** - * - * @return List : Return a List of readable or writable by anonymous users VirtualFS instances - * @throws NamespaceException - */ - public List getReadableOrApproachableByAnonymousVFS() - throws NamespaceException; - - /** - * - * @param user GridUserInterface - * @return VirtualFS - * @throws NamespaceException - */ - public VirtualFS getDefaultVFS(GridUserInterface user) throws NamespaceException; - - /** - * - * @param storageResource StoRI - * @param gridUser GridUserInterface - * @return boolean - * @throws NamespaceException - */ - public boolean isApproachable(StoRI storageResource, GridUserInterface gridUser) - throws NamespaceException; - - /** - * - * @param surl TSURL - * @param user GridUserInterface - * @return StoRI - * @throws NamespaceException - * @throws UnapprochableSurlException - * @throws InvalidSURLException - */ - public StoRI resolveStoRIbySURL(TSURL surl, GridUserInterface user) - throws UnapprochableSurlException, NamespaceException, InvalidSURLException; - - /** - * - * @param surl TSURL - * @return StoRI - * @throws IllegalArgumentException - * @throws NamespaceException - * @throws InvalidSURLException - */ - public StoRI resolveStoRIbySURL(TSURL surl) - throws UnapprochableSurlException, NamespaceException, InvalidSURLException; - - /** - * - * @param absolutePath String - * @param user GridUserInterface - * @return StoRI - * @throws NamespaceException - */ - public StoRI resolveStoRIbyAbsolutePath(String absolutePath, GridUserInterface user) - throws NamespaceException; - - /** - * - * @param absolutePath String - * @param vfs VirtualFS - * @return StoRI - * @throws NamespaceException - */ - public StoRI resolveStoRIbyAbsolutePath(String absolutePath, VirtualFS vfs) - throws NamespaceException; - - /** - * - * @param absolutePath String - * @return StoRI - * @throws NamespaceException - */ - public StoRI resolveStoRIbyAbsolutePath(String absolutePath) throws NamespaceException; - - /** - * - * @param absolutePath String - * @param user GridUserInterface - * @return VirtualFS - * @throws NamespaceException - */ - public VirtualFS resolveVFSbyAbsolutePath(String absolutePath, GridUserInterface user) - throws NamespaceException; - - /** - * - * @param absolutePath String - * @return VirtualFS - * @throws NamespaceException - */ - public VirtualFS resolveVFSbyAbsolutePath(String absolutePath) throws NamespaceException; - - /** - * - * @param pfn PFN - * @return StoRI - * @throws NamespaceException - */ - public StoRI resolveStoRIbyPFN(PFN pfn) throws NamespaceException; - - /** - * - * @param file LocalFile - * @return VirtualFS - * @throws NamespaceException - */ - public VirtualFS resolveVFSbyLocalFile(it.grid.storm.filesystem.LocalFile file) - throws NamespaceException; - - /** - * - * @param pfn PFN - * @return VirtualFS - * @throws NamespaceException - */ - public VirtualFS resolveVFSbyPFN(PFN pfn) throws NamespaceException; - - /** - * - * @param user GridUserInterface - * @return StoRI - * @throws NamespaceException - */ - public StoRI getDefaultSpaceFileForUser(GridUserInterface user) throws NamespaceException; - - /** - * Method that retrieves a previously reserved Space as identified by the SpaceToken, for the - * given new size. If null or Empty TSizeInBytes are supplied, a Space object built off deafult - * values is returned instead. - * - * - * @param totSize TSizeInBytes - * @param token TSpaceToken - * @return Space - */ - - public Space retrieveSpaceByToken(TSizeInBytes totSize, TSpaceToken token); - - /** - * - * @param user GridUserInterface - * @return String - * @throws NamespaceException - */ - public String makeSpaceFileURI(GridUserInterface user) throws NamespaceException; - - /** - * @param fileName - * @return - * @throws IllegalArgumentException - */ - public boolean isSpaceFile(String fileName); - - public String getNamespaceVersion() throws NamespaceException; - - /** - * @param absolutePath - * @return - * @throws NamespaceException - */ - public VirtualFS resolveVFSbyRoot(String absolutePath) throws NamespaceException; - - /** - * @param spaceToken - * @return - * @throws NamespaceException - */ - public VirtualFS resolveVFSbySpaceToken(TSpaceToken spaceToken) - throws NamespaceException; - -} diff --git a/src/main/java/it/grid/storm/namespace/StoRIImpl.java b/src/main/java/it/grid/storm/namespace/StoRIImpl.java index 472b30fc1..1981fd7c7 100644 --- a/src/main/java/it/grid/storm/namespace/StoRIImpl.java +++ b/src/main/java/it/grid/storm/namespace/StoRIImpl.java @@ -14,6 +14,7 @@ import java.util.List; import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import it.grid.storm.balancer.BalancingStrategy; import it.grid.storm.balancer.Node; @@ -24,7 +25,7 @@ import it.grid.storm.common.types.PFN; import it.grid.storm.common.types.StFN; import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; import it.grid.storm.filesystem.FilesystemIF; import it.grid.storm.filesystem.LocalFile; import it.grid.storm.filesystem.ReservationException; @@ -51,649 +52,623 @@ public class StoRIImpl implements StoRI { - private Logger log = NamespaceDirector.getLogger(); - - private TSURL surl; - private PFN pfn; - private ACLMode aclMode = ACLMode.UNDEF; - private TLifeTimeInSeconds lifetime = null; - private Date startTime = null; - private LocalFile localFile = null; - private Space space; - - private VirtualFS vfs; - private FilesystemIF fs; - private SpaceSystem spaceDriver; - private StoRIType type; - private Capability capability; - - // Elements of Name of StoRI - private String stfn; - private String vfsRoot; - private String relativeStFN; - private String relativePath; - private String fileName; - private String stfnPath; - private String stfnRoot; - - private MappingRule winnerRule; - - // Boolean status for full detailed metadata - private boolean volatileInformationAreSet = false; - - public StoRIImpl(VirtualFS vfs, MappingRule winnerRule, String relativeStFN, StoRIType type) { - - if (vfs != null) { - this.vfs = vfs; - capability = (Capability) vfs.getCapabilities(); - } else { - log.error("StoRI built without VFS!"); - } - - if (winnerRule != null) { - stfnRoot = winnerRule.getStFNRoot(); - stfn = stfnRoot + NamingConst.SEPARATOR + relativeStFN; - - vfsRoot = vfs.getRootPath(); - - this.relativeStFN = relativeStFN; - - stfnPath = NamespaceUtil.getStFNPath(stfn); - - relativePath = NamespaceUtil.consumeFileName(relativeStFN); - - if (relativePath != null) { - if (relativePath.startsWith(NamingConst.SEPARATOR)) { - relativePath = relativePath.substring(1); - } - } else { - relativePath = "/"; - } - - fileName = NamespaceUtil.getFileName(relativeStFN); - log.debug("StFN Filename : {} [StFN = '{}']", fileName, - relativeStFN); - - if (type == null) { - if (relativeStFN.endsWith(NamingConst.SEPARATOR)) { - type = StoRIType.FOLDER; - } else { - type = StoRIType.UNKNOWN; - } - } else { - this.type = type; - } - - } else { - log.warn("StoRI built without mapping rule"); - } - } - - public StoRIImpl(VirtualFS vfs, String stfnStr, TLifeTimeInSeconds lifetime, StoRIType type) { - - this.vfs = vfs; - this.capability = (Capability) vfs.getCapabilities(); - // Relative path has to be a path in a relative form! (without "/" at - // begins) - if (relativePath != null) { - if (relativePath.startsWith(NamingConst.SEPARATOR)) { - this.relativePath = relativePath.substring(1); - } - } else { - this.relativePath = "/"; - } - - this.lifetime = lifetime; - - if (type == null) { - this.type = StoRIType.UNKNOWN; - } else { - this.type = type; - } - - this.stfnRoot = null; - - this.fileName = NamespaceUtil.getFileName(stfnStr); - log.debug("StFN Filename : {} [StFN = '{}']", fileName, - stfnStr); - - this.stfnPath = NamespaceUtil.getStFNPath(stfnStr); - log.debug("StFN StFNPath : {} [StFN = '{}']", stfnPath, stfnStr); - - } - - public void allotSpaceByToken(TSpaceToken token) throws ReservationException, - ExpiredSpaceTokenException { - - // Retrieve SpaceSystem Driver - if (spaceDriver == null) { - try { - this.spaceDriver = vfs.getSpaceSystemDriverInstance(); - } catch (NamespaceException e) { - log.error(e.getMessage(), e); - throw new ReservationException( - "Error while retrieving Space System Driver for VFS", e); - } - } - - try { - vfs.useAllSpaceForFile(token, this); - } catch (NamespaceException e) { - log.error("Error using space token {} for file {}: {}", - token, fileName, e.getMessage(),e); - throw new ReservationException(e.getMessage(), e); - } - - } - - public void allotSpaceByToken(TSpaceToken token, TSizeInBytes totSize) - throws ReservationException, ExpiredSpaceTokenException { - - if (spaceDriver == null) { - try { - this.spaceDriver = vfs.getSpaceSystemDriverInstance(); - } catch (NamespaceException e) { - log.error(e.getMessage(),e); - throw new ReservationException( - "Error while retrieving Space System Driver for VFS", e); - } - } - - try { - vfs.useSpaceForFile(token, this, totSize); - } catch (NamespaceException e) { - log.error("Error using space token {} for file {}: {}", - token, fileName, e.getMessage(),e); - throw new ReservationException(e.getMessage(), e); - } - - } - - public void allotSpaceForFile(TSizeInBytes totSize) - throws ReservationException { - - if (spaceDriver == null) { - try { - this.spaceDriver = vfs.getSpaceSystemDriverInstance(); - } catch (NamespaceException e) { - log.error("Error while retrieving Space System Driver for VFS {}", - e.getMessage(), e); - - throw new ReservationException( - "Error while retrieving Space System Driver for VFS", e); - } - } - - try { - vfs.makeSilhouetteForFile(this, totSize); - } catch (NamespaceException e) { - log.error(e.getMessage(),e); - throw new ReservationException( - "Error while constructing 'Space Silhouette' for " + this.fileName, e); - } - - log.debug("Space built. Space " + this.getSpace().getSpaceFile().getPath()); - this.getSpace().allot(); - } - - public String getAbsolutePath() { - return vfs.getRootPath() + NamingConst.SEPARATOR + relativeStFN; - } - - public TLifeTimeInSeconds getFileLifeTime() { - if (!(volatileInformationAreSet)) { - setVolatileInformation(); - } - return lifetime; - } - - public String getFilename() { - - return this.fileName; - } - - public Date getFileStartTime() { - - if (!(volatileInformationAreSet)) { - setVolatileInformation(); - } - return startTime; - } - - public ArrayList getChildren(TDirOption dirOption) - throws InvalidDescendantsEmptyRequestException, - InvalidDescendantsPathRequestException, - InvalidDescendantsFileRequestException { - - ArrayList stoRIList = new ArrayList(); - File fileHandle = new File(getAbsolutePath()); - - if (!fileHandle.isDirectory()) { - if (fileHandle.isFile()) { - log.error("SURL represents a File, not a Directory!"); - throw new InvalidDescendantsFileRequestException(fileHandle); - } else { - log.warn("SURL does not exists!"); - throw new InvalidDescendantsPathRequestException(fileHandle); - } - } else { // SURL point to an existent directory. - // Create ArrayList containing all Valid fileName path found in - // PFN of StoRI's SURL - PathCreator pCreator = new PathCreator(fileHandle, - dirOption.isAllLevelRecursive(), 1); - Collection pathList = pCreator.generateChildren(); - if (pathList.size() == 0) { - log.debug("SURL point to an EMPTY DIRECTORY"); - throw new InvalidDescendantsEmptyRequestException(fileHandle, pathList); - } else { // Creation of StoRI LIST - NamespaceInterface namespace = NamespaceDirector.getNamespace(); - for (String childPath : pathList) { - log.debug(":Creation of new StoRI with path: {}", - childPath); - try { - - StoRI childStorI = namespace.resolveStoRIbyAbsolutePath(childPath, vfs); - childStorI.setMappingRule(getMappingRule()); - - stoRIList.add(childStorI); - } catch (NamespaceException ex) { - log.error("Error occurred while resolving StoRI by absolute path", - ex); - } - } - } - } - return stoRIList; - } - - public LocalFile getLocalFile() { - - if (localFile == null) { - try { - fs = vfs.getFilesystem(); - } catch (NamespaceException ex) { - log.error("Error while retrieving FS driver ", ex); - } - localFile = new LocalFile(getAbsolutePath(), fs); - } - return localFile; - } - - public MappingRule getMappingRule() { - return this.winnerRule; - } - - public List getParents() { - - StoRI createdStoRI = null; - ArrayList parentList = new ArrayList(); - String consumeElements = this.relativePath; - String consumed; - boolean lastElements = false; - - do { - createdStoRI = new StoRIImpl(this.vfs, this.winnerRule, consumeElements, - StoRIType.FOLDER); - parentList.add(createdStoRI); - consumed = NamespaceUtil.consumeElement(consumeElements); - if (consumed.equals(consumeElements)) { - lastElements = true; - } else { - consumeElements = consumed; - } - } while ((!lastElements)); - - return parentList; - } - - public PFN getPFN() { - - if (pfn == null) { - try { - this.pfn = PFN.make(getAbsolutePath()); - } catch (InvalidPFNAttributeException e) { - log.error(e.getMessage(),e); - } - } - return this.pfn; - } - - public String getRelativePath() { - - return this.relativePath; - } - - public String getRelativeStFN() { - - return this.relativeStFN; - } - - public Space getSpace() { - - if (space == null) { - log.error("No space bound with this StoRI!"); - return null; - } - return this.space; - } - - public StFN getStFN() { - - StFN stfn = null; - if (this.surl == null) { - getSURL(); - } - stfn = surl.sfn().stfn(); - return stfn; - } - - public String getStFNPath() { - - return this.stfnPath; - } - - public String getStFNRoot() { - - return this.stfnRoot; - } - - public StoRIType getStoRIType() { - - return this.type; - } - - public TSURL getSURL() { - - /** - * The String passed to TSURL.makeFromString MUST contains a valid TSURL in - * string format, not only relativePath. - */ - if (this.surl == null) { - try { - this.surl = TSURL.makeFromStringValidate(buildSURLString()); - } catch (Throwable e) { - log.error("Unable to build the SURL with relative path: {}. {}", - relativePath, e.getMessage(), e); - } - } - return surl; - } - - public TTURL getTURL(TURLPrefix desiredProtocols) - throws IllegalArgumentException, InvalidGetTURLProtocolException, - TURLBuildingException { - - TTURL resultTURL = null; - - if (desiredProtocols == null || desiredProtocols.size() == 0) { - log - .error(" request with NULL or empty prefixOfAcceptedTransferProtocol!"); - throw new IllegalArgumentException( - "unable to build the TTURL, invalid arguments: desiredProtocols=" - + desiredProtocols); - } else { - - // Within the request there are some protocol preferences - // Calculate the intersection between Desired Protocols and Available - // Protocols - List desiredP = new ArrayList<>(desiredProtocols.getDesiredProtocols()); - List availableP = new ArrayList<>(capability.getAllManagedProtocols()); - desiredP.retainAll(availableP); - - if (desiredP.isEmpty()) { - String msg = String.format("None of [%s] protocols matches the available " - + "protocols [%s]", join(desiredP, ','), join(availableP, ',')); - log.error(msg); - throw new InvalidGetTURLProtocolException(msg); - - } else { - - log.debug("Protocol matching.. Intersection size: {}", - desiredP.size()); - - Protocol choosen = null; - Authority authority = null; - int index = 0; - boolean turlBuilt = false; - while (!turlBuilt && index < desiredP.size()) { - choosen = desiredP.get(index); - authority = null; - log.debug("Selected Protocol: {}", choosen); - if (capability.isPooledProtocol(choosen)) { - log.debug("The protocol selected is in POOL Configuration"); - try { - authority = getPooledAuthority(choosen); - } catch (BalancingStrategyException e) { - log - .warn("Unable to get the pool member to be used to build the turl. BalancerException : {}", - e.getMessage()); - index++; - continue; - } - } else { - log.debug("The protocol selected is in NON-POOL Configuration"); - TransportProtocol transProt = null; - List protList = capability - .getManagedProtocolByScheme(choosen); - if (protList.size() > 1) { // Strange case - log - .warn("More than one protocol {}" - + " defined but NOT in POOL Configuration. Taking the first one.", - choosen); - } - transProt = protList.get(0); - authority = transProt.getAuthority(); - } - - if (choosen.equals(Protocol.HTTP) || choosen.equals(Protocol.HTTPS)){ - resultTURL = buildHTTPTURL(choosen,authority); - } else { - resultTURL = buildTURL(choosen, authority); - } - - turlBuilt = true; - } - if (!turlBuilt) { - throw new TURLBuildingException( - "Unable to build the turl given protocols " + desiredP.toString()); - } - } - } - return resultTURL; - } - - public VirtualFS getVirtualFileSystem() { - return this.vfs; - } - - public boolean hasJustInTimeACLs() { - - boolean result = true; - - if (aclMode.equals(ACLMode.UNDEF)) { - this.aclMode = vfs.getCapabilities().getACLMode(); - } - if (aclMode.equals(ACLMode.JUST_IN_TIME)) { - result = true; - } else { - result = false; - } - - return result; - } - - - public void setMappingRule(MappingRule winnerRule) { - this.winnerRule = winnerRule; - } - - public void setSpace(Space space) { - this.space = space; - } - - public void setStFNRoot(String stfnRoot) { - - this.stfnRoot = stfnRoot; - } - - public void setStoRIType(StoRIType type) { - - this.type = type; - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - - sb.append("\n"); - sb.append(" stori.stfn : " + this.getStFN().toString() + "\n"); - sb.append(" stori.vfs-root :" + this.vfsRoot + "\n"); - sb.append(" stori.absolutePath : " + this.getAbsolutePath() + "\n"); - sb.append(" stori.vfs NAME : " + this.getVFSName() + "\n"); - sb.append(" stori.stfn FileName : " + this.fileName + "\n"); - sb.append(" stori.stfn StFN path : " + this.stfnPath + "\n"); - sb.append(" stori.stfn rel. Path : " + this.relativePath + "\n"); - sb.append(" stori.relative StFN : " + this.relativeStFN + "\n"); - sb.append(" stori.stfn-root : " + this.stfnRoot + "\n"); - sb.append(" story.type : " + this.type + "\n"); - sb.append(" stori.SURL : " + this.getSURL() + "\n"); - sb.append(" stori.localFile : " + this.getLocalFile() + "\n"); - sb.append(" stori.mappingRule : " + this.getMappingRule() + "\n"); - - return sb.toString(); - } - - private String buildSURLString() throws NamespaceException { - String stfn = stfnRoot + NamingConst.SEPARATOR + relativeStFN; - SURL surl = new SURL(stfn); - return surl.toString(); - } - - private TTURL buildHTTPTURL(Protocol p, Authority authority){ - - String prefix = Configuration.getInstance().getHTTPTURLPrefix(); - StringBuilder sb = new StringBuilder(); - sb.append(p.getProtocolPrefix()); - sb.append(authority); - - if ( prefix != null){ + private static Logger log = LoggerFactory.getLogger(StoRIImpl.class); + + private TSURL surl; + private PFN pfn; + private ACLMode aclMode = ACLMode.UNDEF; + private TLifeTimeInSeconds lifetime = null; + private Date startTime = null; + private LocalFile localFile = null; + private Space space; + + private VirtualFS vfs; + private FilesystemIF fs; + private SpaceSystem spaceDriver; + private StoRIType type; + private Capability capability; + + // Elements of Name of StoRI + private String stfn; + private String vfsRoot; + private String relativeStFN; + private String relativePath; + private String fileName; + private String stfnPath; + private String stfnRoot; + + private MappingRule winnerRule; + + // Boolean status for full detailed metadata + private boolean volatileInformationAreSet = false; + + public StoRIImpl(VirtualFS vfs, MappingRule winnerRule, String relativeStFN, StoRIType type) { + + if (vfs != null) { + this.vfs = vfs; + capability = (Capability) vfs.getCapabilities(); + } else { + log.error("StoRI built without VFS!"); + } + + if (winnerRule != null) { + stfnRoot = winnerRule.getStFNRoot(); + stfn = stfnRoot + NamingConst.SEPARATOR + relativeStFN; + + vfsRoot = vfs.getRootPath(); + + this.relativeStFN = relativeStFN; + + stfnPath = NamespaceUtil.getStFNPath(stfn); + + relativePath = NamespaceUtil.consumeFileName(relativeStFN); + + if (relativePath != null) { + if (relativePath.startsWith(NamingConst.SEPARATOR)) { + relativePath = relativePath.substring(1); + } + } else { + relativePath = "/"; + } + + fileName = NamespaceUtil.getFileName(relativeStFN); + log.debug("StFN Filename : {} [StFN = '{}']", fileName, relativeStFN); + + if (type == null) { + if (relativeStFN.endsWith(NamingConst.SEPARATOR)) { + type = StoRIType.FOLDER; + } else { + type = StoRIType.UNKNOWN; + } + } else { + this.type = type; + } + + } else { + log.warn("StoRI built without mapping rule"); + } + } + + public StoRIImpl(VirtualFS vfs, String stfnStr, TLifeTimeInSeconds lifetime, StoRIType type) { + + this.vfs = vfs; + this.capability = (Capability) vfs.getCapabilities(); + // Relative path has to be a path in a relative form! (without "/" at + // begins) + if (relativePath != null) { + if (relativePath.startsWith(NamingConst.SEPARATOR)) { + this.relativePath = relativePath.substring(1); + } + } else { + this.relativePath = "/"; + } + + this.lifetime = lifetime; + + if (type == null) { + this.type = StoRIType.UNKNOWN; + } else { + this.type = type; + } + + this.stfnRoot = null; + + this.fileName = NamespaceUtil.getFileName(stfnStr); + log.debug("StFN Filename : {} [StFN = '{}']", fileName, stfnStr); + + this.stfnPath = NamespaceUtil.getStFNPath(stfnStr); + log.debug("StFN StFNPath : {} [StFN = '{}']", stfnPath, stfnStr); + + } + + public void allotSpaceByToken(TSpaceToken token) + throws ReservationException, ExpiredSpaceTokenException { + + // Retrieve SpaceSystem Driver + if (spaceDriver == null) { + try { + this.spaceDriver = vfs.getSpaceSystemDriverInstance(); + } catch (NamespaceException e) { + log.error(e.getMessage(), e); + throw new ReservationException("Error while retrieving Space System Driver for VFS", e); + } + } + + try { + vfs.useAllSpaceForFile(token, this); + } catch (NamespaceException e) { + log.error("Error using space token {} for file {}: {}", token, fileName, e.getMessage(), e); + throw new ReservationException(e.getMessage(), e); + } + + } + + public void allotSpaceByToken(TSpaceToken token, TSizeInBytes totSize) + throws ReservationException, ExpiredSpaceTokenException { + + if (spaceDriver == null) { + try { + this.spaceDriver = vfs.getSpaceSystemDriverInstance(); + } catch (NamespaceException e) { + log.error(e.getMessage(), e); + throw new ReservationException("Error while retrieving Space System Driver for VFS", e); + } + } + + try { + vfs.useSpaceForFile(token, this, totSize); + } catch (NamespaceException e) { + log.error("Error using space token {} for file {}: {}", token, fileName, e.getMessage(), e); + throw new ReservationException(e.getMessage(), e); + } + + } + + public void allotSpaceForFile(TSizeInBytes totSize) throws ReservationException { + + if (spaceDriver == null) { + try { + this.spaceDriver = vfs.getSpaceSystemDriverInstance(); + } catch (NamespaceException e) { + log.error("Error while retrieving Space System Driver for VFS {}", e.getMessage(), e); + + throw new ReservationException("Error while retrieving Space System Driver for VFS", e); + } + } + + try { + vfs.makeSilhouetteForFile(this, totSize); + } catch (NamespaceException e) { + log.error(e.getMessage(), e); + throw new ReservationException( + "Error while constructing 'Space Silhouette' for " + this.fileName, e); + } + + log.debug("Space built. Space " + this.getSpace().getSpaceFile().getPath()); + this.getSpace().allot(); + } + + public String getAbsolutePath() { + return vfs.getRootPath() + NamingConst.SEPARATOR + relativeStFN; + } + + public TLifeTimeInSeconds getFileLifeTime() { + if (!(volatileInformationAreSet)) { + setVolatileInformation(); + } + return lifetime; + } + + public String getFilename() { + + return this.fileName; + } + + public Date getFileStartTime() { + + if (!(volatileInformationAreSet)) { + setVolatileInformation(); + } + return startTime; + } + + public ArrayList getChildren(TDirOption dirOption) + throws InvalidDescendantsEmptyRequestException, InvalidDescendantsPathRequestException, + InvalidDescendantsFileRequestException { + + ArrayList stoRIList = new ArrayList(); + File fileHandle = new File(getAbsolutePath()); + + if (!fileHandle.isDirectory()) { + if (fileHandle.isFile()) { + log.error("SURL represents a File, not a Directory!"); + throw new InvalidDescendantsFileRequestException(fileHandle); + } else { + log.warn("SURL does not exists!"); + throw new InvalidDescendantsPathRequestException(fileHandle); + } + } else { // SURL point to an existent directory. + // Create ArrayList containing all Valid fileName path found in + // PFN of StoRI's SURL + PathCreator pCreator = new PathCreator(fileHandle, dirOption.isAllLevelRecursive(), 1); + Collection pathList = pCreator.generateChildren(); + if (pathList.size() == 0) { + log.debug("SURL point to an EMPTY DIRECTORY"); + throw new InvalidDescendantsEmptyRequestException(fileHandle, pathList); + } else { // Creation of StoRI LIST + Namespace namespace = Namespace.getInstance(); + for (String childPath : pathList) { + log.debug(":Creation of new StoRI with path: {}", childPath); + try { + + StoRI childStorI = namespace.resolveStoRIbyAbsolutePath(childPath, vfs); + childStorI.setMappingRule(getMappingRule()); + + stoRIList.add(childStorI); + } catch (NamespaceException ex) { + log.error("Error occurred while resolving StoRI by absolute path", ex); + } + } + } + } + return stoRIList; + } + + public LocalFile getLocalFile() { + + if (localFile == null) { + try { + fs = vfs.getFilesystem(); + } catch (NamespaceException ex) { + log.error("Error while retrieving FS driver ", ex); + } + localFile = new LocalFile(getAbsolutePath(), fs); + } + return localFile; + } + + public MappingRule getMappingRule() { + return this.winnerRule; + } + + public List getParents() { + + StoRI createdStoRI = null; + ArrayList parentList = new ArrayList(); + String consumeElements = this.relativePath; + String consumed; + boolean lastElements = false; + + do { + createdStoRI = new StoRIImpl(this.vfs, this.winnerRule, consumeElements, StoRIType.FOLDER); + parentList.add(createdStoRI); + consumed = NamespaceUtil.consumeElement(consumeElements); + if (consumed.equals(consumeElements)) { + lastElements = true; + } else { + consumeElements = consumed; + } + } while ((!lastElements)); + + return parentList; + } + + public PFN getPFN() { + + if (pfn == null) { + try { + this.pfn = PFN.make(getAbsolutePath()); + } catch (InvalidPFNAttributeException e) { + log.error(e.getMessage(), e); + } + } + return this.pfn; + } + + public String getRelativePath() { + + return this.relativePath; + } + + public String getRelativeStFN() { + + return this.relativeStFN; + } + + public Space getSpace() { + + if (space == null) { + log.error("No space bound with this StoRI!"); + return null; + } + return this.space; + } + + public StFN getStFN() { + + StFN stfn = null; + if (this.surl == null) { + getSURL(); + } + stfn = surl.sfn().stfn(); + return stfn; + } + + public String getStFNPath() { + + return this.stfnPath; + } + + public String getStFNRoot() { + + return this.stfnRoot; + } + + public StoRIType getStoRIType() { + + return this.type; + } + + public TSURL getSURL() { + + /** + * The String passed to TSURL.makeFromString MUST contains a valid TSURL in string format, not + * only relativePath. + */ + if (this.surl == null) { + try { + this.surl = TSURL.makeFromStringValidate(buildSURLString()); + } catch (Throwable e) { + log.error("Unable to build the SURL with relative path: {}. {}", relativePath, + e.getMessage(), e); + } + } + return surl; + } + + public TTURL getTURL(TURLPrefix desiredProtocols) + throws IllegalArgumentException, InvalidGetTURLProtocolException, TURLBuildingException { + + TTURL resultTURL = null; + + if (desiredProtocols == null || desiredProtocols.size() == 0) { + log.error(" request with NULL or empty prefixOfAcceptedTransferProtocol!"); + throw new IllegalArgumentException( + "unable to build the TTURL, invalid arguments: desiredProtocols=" + desiredProtocols); + } else { + + // Within the request there are some protocol preferences + // Calculate the intersection between Desired Protocols and Available + // Protocols + List desiredP = new ArrayList<>(desiredProtocols.getDesiredProtocols()); + List availableP = new ArrayList<>(capability.getAllManagedProtocols()); + desiredP.retainAll(availableP); + + if (desiredP.isEmpty()) { + String msg = + String.format("None of [%s] protocols matches the available protocols [%s]", + join(desiredProtocols.getDesiredProtocols(), ','), join(availableP, ',')); + log.debug(msg); + throw new InvalidGetTURLProtocolException(msg); + + } else { + + log.debug("Protocol matching.. Intersection size: {}", desiredP.size()); + + Protocol choosen = null; + Authority authority = null; + int index = 0; + boolean turlBuilt = false; + while (!turlBuilt && index < desiredP.size()) { + choosen = desiredP.get(index); + authority = null; + log.debug("Selected Protocol: {}", choosen); + if (capability.isPooledProtocol(choosen)) { + log.debug("The protocol selected is in POOL Configuration"); + try { + authority = getPooledAuthority(choosen); + } catch (BalancingStrategyException e) { + log.warn( + "Unable to get the pool member to be used to build the turl. BalancerException : {}", + e.getMessage()); + index++; + continue; + } + } else { + log.debug("The protocol selected is in NON-POOL Configuration"); + TransportProtocol transProt = null; + List protList = capability.getManagedProtocolByScheme(choosen); + if (protList.size() > 1) { // Strange case + log.warn("More than one protocol {}" + + " defined but NOT in POOL Configuration. Taking the first one.", choosen); + } + transProt = protList.get(0); + authority = transProt.getAuthority(); + } + + if (choosen.equals(Protocol.HTTP) || choosen.equals(Protocol.HTTPS)) { + resultTURL = buildHTTPTURL(choosen, authority); + } else { + resultTURL = buildTURL(choosen, authority); + } + + turlBuilt = true; + } + if (!turlBuilt) { + throw new TURLBuildingException( + "Unable to build the turl given protocols " + desiredP.toString()); + } + } + } + return resultTURL; + } + + public VirtualFS getVirtualFileSystem() { + return this.vfs; + } + + public boolean hasJustInTimeACLs() { + + boolean result = true; + + if (aclMode.equals(ACLMode.UNDEF)) { + this.aclMode = vfs.getCapabilities().getACLMode(); + } + if (aclMode.equals(ACLMode.JUST_IN_TIME)) { + result = true; + } else { + result = false; + } + + return result; + } + + + public void setMappingRule(MappingRule winnerRule) { + this.winnerRule = winnerRule; + } + + public void setSpace(Space space) { + this.space = space; + } + + public void setStFNRoot(String stfnRoot) { + + this.stfnRoot = stfnRoot; + } + + public void setStoRIType(StoRIType type) { + + this.type = type; + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + + sb.append("\n"); + sb.append(" stori.stfn : " + this.getStFN().toString() + "\n"); + sb.append(" stori.vfs-root :" + this.vfsRoot + "\n"); + sb.append(" stori.absolutePath : " + this.getAbsolutePath() + "\n"); + sb.append(" stori.vfs NAME : " + this.getVFSName() + "\n"); + sb.append(" stori.stfn FileName : " + this.fileName + "\n"); + sb.append(" stori.stfn StFN path : " + this.stfnPath + "\n"); + sb.append(" stori.stfn rel. Path : " + this.relativePath + "\n"); + sb.append(" stori.relative StFN : " + this.relativeStFN + "\n"); + sb.append(" stori.stfn-root : " + this.stfnRoot + "\n"); + sb.append(" story.type : " + this.type + "\n"); + sb.append(" stori.SURL : " + this.getSURL() + "\n"); + sb.append(" stori.localFile : " + this.getLocalFile() + "\n"); + sb.append(" stori.mappingRule : " + this.getMappingRule() + "\n"); + + return sb.toString(); + } + + private String buildSURLString() throws NamespaceException { + String stfn = stfnRoot + NamingConst.SEPARATOR + relativeStFN; + SURL surl = new SURL(stfn); + return surl.toString(); + } + + private TTURL buildHTTPTURL(Protocol p, Authority authority) { + + String prefix = StormConfiguration.getInstance().getHTTPTURLPrefix(); + StringBuilder sb = new StringBuilder(); + sb.append(p.getProtocolPrefix()); + sb.append(authority); + + if (prefix != null) { sb.append(prefix); } - sb.append(getStFN().toString()); - - log.debug("built http turl: {}", sb.toString()); - - return TTURL.makeFromString(sb.toString()); - - } - private TTURL buildTURL(Protocol protocol, Authority authority) - throws InvalidProtocolForTURLException { - - TTURL result = null; - - switch (protocol.getProtocolIndex()) { - case 0: // EMPTY Protocol - throw new InvalidProtocolForTURLException(protocol.getSchema()); - case 1: - result = TURLBuilder.buildFileTURL(authority, this.getPFN()); - break; // FILE Protocol - case 2: - result = TURLBuilder.buildGsiftpTURL(authority, this.getPFN()); - break; // GSIFTP Protocol - case 3: - result = TURLBuilder.buildRFIOTURL(authority, this.getPFN()); - break; // RFIO Protocol - case 4: // SRM Protocol - throw new InvalidProtocolForTURLException(protocol.getSchema()); - case 5: - result = TURLBuilder.buildROOTTURL(authority, this.getPFN()); - break; // ROOT Protocol - case 8: - result = TURLBuilder.buildXROOTTURL(authority, this.getPFN()); - break; // XROOT Protocol - default: - // Unknown protocol - throw new InvalidProtocolForTURLException(protocol.getSchema()); - } - return result; - } - - /** - * @param pooledProtocol - * @return - * @throws BalancerException - */ - private Authority getPooledAuthority(Protocol pooledProtocol) - throws BalancingStrategyException { - - Authority authority = null; - if (pooledProtocol.equals(Protocol.GSIFTP) - || pooledProtocol.equals(Protocol.HTTP) - || pooledProtocol.equals(Protocol.HTTPS)) { - BalancingStrategy bal = vfs - .getProtocolBalancingStrategy(pooledProtocol); - if (bal != null) { - Node node = bal.getNextElement(); - authority = new Authority(node.getHostname(), node.getPort()); - } - } else { - log.error("Unable to manage pool with protocol different from GSIFTP."); - } - return authority; - } - - private String getVFSName() { - - String result = "UNDEF"; - if (vfs != null) { - result = vfs.getAliasName(); - } - return result; - } - - /** - * Set "lifetime" and "startTime" information. The corresponding values are - * retrieved from the DB. - */ - private void setVolatileInformation() { - - VolatileAndJiTCatalog catalog = VolatileAndJiTCatalog.getInstance(); - List volatileInfo = catalog.volatileInfoOn(getPFN()); - if (volatileInfo.size() != 2) { - lifetime = TLifeTimeInSeconds.makeInfinite(); - startTime = null; - return; - } - startTime = new Date(((Calendar) volatileInfo.get(0)).getTimeInMillis()); - lifetime = (TLifeTimeInSeconds) volatileInfo.get(1); - volatileInformationAreSet = true; - } + sb.append(getStFN().toString()); + + log.debug("built http turl: {}", sb.toString()); + + return TTURL.makeFromString(sb.toString()); + + } + + private TTURL buildTURL(Protocol protocol, Authority authority) + throws InvalidProtocolForTURLException { + + TTURL result = null; + + switch (protocol.getProtocolIndex()) { + case 0: // EMPTY Protocol + throw new InvalidProtocolForTURLException(protocol.getSchema()); + case 1: + result = TURLBuilder.buildFileTURL(authority, this.getPFN()); + break; // FILE Protocol + case 2: + result = TURLBuilder.buildGsiftpTURL(authority, this.getPFN()); + break; // GSIFTP Protocol + case 3: + result = TURLBuilder.buildRFIOTURL(authority, this.getPFN()); + break; // RFIO Protocol + case 4: // SRM Protocol + throw new InvalidProtocolForTURLException(protocol.getSchema()); + case 5: + result = TURLBuilder.buildROOTTURL(authority, this.getPFN()); + break; // ROOT Protocol + case 8: + result = TURLBuilder.buildXROOTTURL(authority, this.getPFN()); + break; // XROOT Protocol + default: + // Unknown protocol + throw new InvalidProtocolForTURLException(protocol.getSchema()); + } + return result; + } + + /** + * @param pooledProtocol + * @return + * @throws BalancerException + */ + private Authority getPooledAuthority(Protocol pooledProtocol) throws BalancingStrategyException { + + Authority authority = null; + if (pooledProtocol.equals(Protocol.GSIFTP) || pooledProtocol.equals(Protocol.HTTP) + || pooledProtocol.equals(Protocol.HTTPS)) { + BalancingStrategy bal = vfs.getProtocolBalancingStrategy(pooledProtocol); + if (bal != null) { + Node node = bal.getNextElement(); + authority = new Authority(node.getHostname(), node.getPort()); + } + } else { + log.error("Unable to manage pool with protocol different from GSIFTP."); + } + return authority; + } + + private String getVFSName() { + + String result = "UNDEF"; + if (vfs != null) { + result = vfs.getAliasName(); + } + return result; + } + + /** + * Set "lifetime" and "startTime" information. The corresponding values are retrieved from the DB. + */ + private void setVolatileInformation() { + + VolatileAndJiTCatalog catalog = VolatileAndJiTCatalog.getInstance(); + List volatileInfo = catalog.volatileInfoOn(getPFN()); + if (volatileInfo.size() != 2) { + lifetime = TLifeTimeInSeconds.makeInfinite(); + startTime = null; + return; + } + startTime = new Date(((Calendar) volatileInfo.get(0)).getTimeInMillis()); + lifetime = (TLifeTimeInSeconds) volatileInfo.get(1); + volatileInformationAreSet = true; + } @Override - public StFN getStFNFromMappingRule() { + public StFN getStFNFromMappingRule() { try { - if (getMappingRule() == null){ - log.warn("Mapping rule is null for this StorI. " + - "Falling back to VFS StFN."); + if (getMappingRule() == null) { + log.warn("Mapping rule is null for this StorI. " + "Falling back to VFS StFN."); return getStFN(); } - + String mappingRuleRoot = getMappingRule().getStFNRoot(); - String mappedStfn = mappingRuleRoot + NamingConst.SEPARATOR - + relativeStFN; - + String mappedStfn = mappingRuleRoot + NamingConst.SEPARATOR + relativeStFN; + return StFN.make(mappedStfn); } catch (InvalidStFNAttributeException e) { - - log.error("Error building StFN from mapping rule. Reason: {}", - e.getMessage(),e); - + + log.error("Error building StFN from mapping rule. Reason: {}", e.getMessage(), e); + log.error("Falling back to VFS StFN."); - + return getStFN(); } diff --git a/src/main/java/it/grid/storm/namespace/TURLBuilder.java b/src/main/java/it/grid/storm/namespace/TURLBuilder.java index 7ea481474..d70a181ad 100644 --- a/src/main/java/it/grid/storm/namespace/TURLBuilder.java +++ b/src/main/java/it/grid/storm/namespace/TURLBuilder.java @@ -5,71 +5,67 @@ package it.grid.storm.namespace; import it.grid.storm.common.types.PFN; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; import it.grid.storm.namespace.model.Authority; import it.grid.storm.namespace.model.Protocol; import it.grid.storm.srm.types.InvalidTTURLAttributesException; import it.grid.storm.srm.types.TTURL; import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TURLBuilder { - private static Logger log = NamespaceDirector.getLogger(); + private static Logger LOG = LoggerFactory.getLogger(TURLBuilder.class); - public TURLBuilder() { + public TURLBuilder() { - super(); - } + super(); + } - private static TTURL buildTURL(Protocol protocol, Authority authority, - String extraSlashes, PFN physicalFN) { + private static TTURL buildTURL(Protocol protocol, Authority authority, String extraSlashes, + PFN physicalFN) { - TTURL turl = null; - String turlString = null; - try { - turlString = protocol.getProtocolPrefix() + authority.toString() - + extraSlashes + physicalFN.getValue(); - log.debug("turlString used to build the TURL : {}", turlString); - turl = TTURL.makeFromString(turlString); - } catch (InvalidTTURLAttributesException ex) { - log.error("Error while constructing TURL with Authority '{}': {}", - authority, ex.getMessage(), ex); - } - return turl; - } + TTURL turl = null; + String turlString = null; + try { + turlString = protocol.getProtocolPrefix() + authority.toString() + extraSlashes + + physicalFN.getValue(); + LOG.debug("turlString used to build the TURL : {}", turlString); + turl = TTURL.makeFromString(turlString); + } catch (InvalidTTURLAttributesException ex) { + LOG.error("Error while constructing TURL with Authority '{}': {}", authority, ex.getMessage(), + ex); + } + return turl; + } - public static TTURL buildFileTURL(Authority authority, PFN physicalFN) { + public static TTURL buildFileTURL(Authority authority, PFN physicalFN) { - String extraSlashesForFile = Configuration.getInstance() - .getExtraSlashesForFileTURL(); - return buildTURL(Protocol.FILE, authority, extraSlashesForFile, physicalFN); - } + String extraSlashesForFile = StormConfiguration.getInstance().getExtraSlashesForFileTURL(); + return buildTURL(Protocol.FILE, authority, extraSlashesForFile, physicalFN); + } - public static TTURL buildGsiftpTURL(Authority authority, PFN physicalFN) { + public static TTURL buildGsiftpTURL(Authority authority, PFN physicalFN) { - String extraSlashesForGSIFTP = Configuration.getInstance() - .getExtraSlashesForGsiFTPTURL(); - return buildTURL(Protocol.GSIFTP, authority, extraSlashesForGSIFTP, - physicalFN); - } + String extraSlashesForGSIFTP = StormConfiguration.getInstance().getExtraSlashesForGsiFTPTURL(); + return buildTURL(Protocol.GSIFTP, authority, extraSlashesForGSIFTP, physicalFN); + } - public static TTURL buildRFIOTURL(Authority authority, PFN physicalFN) { + public static TTURL buildRFIOTURL(Authority authority, PFN physicalFN) { - String extraSlashesForRFIO = Configuration.getInstance() - .getExtraSlashesForRFIOTURL(); - return buildTURL(Protocol.RFIO, authority, extraSlashesForRFIO, physicalFN); - } + String extraSlashesForRFIO = StormConfiguration.getInstance().getExtraSlashesForRFIOTURL(); + return buildTURL(Protocol.RFIO, authority, extraSlashesForRFIO, physicalFN); + } - public static TTURL buildROOTTURL(Authority authority, PFN physicalFN) { + public static TTURL buildROOTTURL(Authority authority, PFN physicalFN) { - String extraSlashesForROOT = Configuration.getInstance() - .getExtraSlashesForROOTTURL(); - return buildTURL(Protocol.ROOT, authority, extraSlashesForROOT, physicalFN); - } - - public static TTURL buildXROOTTURL(Authority authority, PFN physicalFN) { + String extraSlashesForROOT = StormConfiguration.getInstance().getExtraSlashesForROOTTURL(); + return buildTURL(Protocol.ROOT, authority, extraSlashesForROOT, physicalFN); + } - return buildROOTTURL(authority, physicalFN); - } -} \ No newline at end of file + public static TTURL buildXROOTTURL(Authority authority, PFN physicalFN) { + + return buildROOTTURL(authority, physicalFN); + } +} diff --git a/src/main/java/it/grid/storm/namespace/config/NamespaceCheck.java b/src/main/java/it/grid/storm/namespace/config/NamespaceCheck.java index ea879c1ad..ef2268c63 100644 --- a/src/main/java/it/grid/storm/namespace/config/NamespaceCheck.java +++ b/src/main/java/it/grid/storm/namespace/config/NamespaceCheck.java @@ -11,10 +11,10 @@ import java.util.Map; import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.Lists; -import it.grid.storm.namespace.NamespaceDirector; import it.grid.storm.namespace.model.ACLEntry; import it.grid.storm.namespace.model.ApproachableRule; import it.grid.storm.namespace.model.Capability; @@ -25,158 +25,157 @@ public class NamespaceCheck { - private final Logger log = NamespaceDirector.getLogger(); - private final Map vfss; - private final Map maprules; - private final Map apprules; - - public NamespaceCheck(Map vfss, - Map maprules, - Map apprules) { - - this.vfss = vfss; - this.maprules = maprules; - this.apprules = apprules; - } - - public boolean check() { - - boolean vfsCheck = checkVFS(); - boolean mapRulesCheck = checkMapRules(); - boolean appRules = checkAppRules(); - checkGroups(vfsCheck); - return vfsCheck && mapRulesCheck && appRules; - } - - private boolean checkGroups(boolean vfsCheckResult) { - - log - .info("Namespace check. Checking of the existence of the needed Local group ..."); - boolean result = true; - if (!vfsCheckResult) { - log - .warn("Skip the check of the needed Local Group, because check of VFSs failed."); - } else { - - List vf = new ArrayList<>(vfss.values()); - for (VirtualFS vfs : vf) { - - // Check the presence of Default ACL - Capability cap = vfs.getCapabilities(); - if (cap != null) { - DefaultACL defACL = cap.getDefaultACL(); - if (defACL != null) { - List acl = new ArrayList<>(defACL.getACL()); - if (!acl.isEmpty()) { - for (ACLEntry aclEntry : acl) { - if (!LocalGroups.getInstance().isGroupDefined( - aclEntry.getGroupName())) { - log.warn("!!!!! Local Group for ACL ('{}') is not defined!", aclEntry); - result = false; - } - } - } - } - } - } - } - if (result) { - log.info("All local groups are defined. "); - } else { - log.warn("Please check the local group needed to StoRM"); - } - return result; - } - - /** - * Check if the root of the VFS exists. - * - * @todo: this method don't check if the root is accessible by storm user. - * - * @return true if "filesystems" element (list of VFS) is valid false - * otherwise - */ - private boolean checkVFS() { - - log.info("Namespace checking VFSs .."); - boolean result = true; - if (vfss == null) { - log.error("Anyone VFS is defined in namespace!"); - return false; - } else { - List rules = new ArrayList<>(vfss.values()); - Iterator scan = rules.iterator(); - - while (scan.hasNext()) { - VirtualFS vfs = scan.next(); - - String aliasName = vfs.getAliasName(); - log.debug("VFS named '{}' found.", aliasName); - String root = vfs.getRootPath(); - File file = new File(root); - boolean exists = file.exists(); - if (!exists) { - log.error("ERROR in NAMESPACE: The VFS '{}' does not have a valid root :'{}'", aliasName, root); - result = false; - } - } - } - if (result) { - log.info(" VFSs are well-defined."); - } - return result; - } - - private boolean checkMapRules() { - - boolean result = true; - if (maprules == null) { - return false; - } else { - int nrOfMappingRules = maprules.size(); - log.debug("Number of Mapping rules = {}", nrOfMappingRules); - List rules = new ArrayList<>(maprules.values()); - Iterator scan = rules.iterator(); - MappingRule rule; - String mappedVFS; - boolean check = false; - while (scan.hasNext()) { - rule = scan.next(); - mappedVFS = rule.getMappedFS().getAliasName(); - check = vfss.containsKey(mappedVFS); - if (!check) { - log.error("ERROR in NAMESPACE - MAP RULE '{}' point a UNKNOWN VFS '{}'!", rule.getRuleName(), mappedVFS); - result = false; - } - } - } - return result; - - } - - private boolean checkAppRules() { - - boolean result = true; - if (apprules == null) { - return false; - } else { - int nrOfApproachableRules = apprules.size(); - log.debug("Number of Approachable rules = {}", nrOfApproachableRules); - List rules = new ArrayList<>(apprules.values()); - Iterator scan = rules.iterator(); - boolean check = false; - while (scan.hasNext()) { - ApproachableRule rule = scan.next(); - List approachVFSs = Lists.newArrayList(rule.getApproachableVFS()); - for (VirtualFS aVfs : approachVFSs) { - check = vfss.containsKey(aVfs.getAliasName()); - if (!check) { - log.error("ERROR in NAMESPACE - APP RULE '{}' point a UNKNOWN VFS '{}'!", rule.getRuleName(), aVfs); - result = false; - } - } - } - } - return result; - } + private static Logger log = LoggerFactory.getLogger(NamespaceCheck.class); + + private final Map vfss; + private final Map maprules; + private final Map apprules; + + public NamespaceCheck(Map vfss, Map maprules, + Map apprules) { + + this.vfss = vfss; + this.maprules = maprules; + this.apprules = apprules; + } + + public boolean check() { + + boolean vfsCheck = checkVFS(); + boolean mapRulesCheck = checkMapRules(); + boolean appRules = checkAppRules(); + checkGroups(vfsCheck); + return vfsCheck && mapRulesCheck && appRules; + } + + private boolean checkGroups(boolean vfsCheckResult) { + + log.info("Namespace check. Checking of the existence of the needed Local group ..."); + boolean result = true; + if (!vfsCheckResult) { + log.warn("Skip the check of the needed Local Group, because check of VFSs failed."); + } else { + + List vf = new ArrayList<>(vfss.values()); + for (VirtualFS vfs : vf) { + + // Check the presence of Default ACL + Capability cap = vfs.getCapabilities(); + if (cap != null) { + DefaultACL defACL = cap.getDefaultACL(); + if (defACL != null) { + List acl = new ArrayList<>(defACL.getACL()); + if (!acl.isEmpty()) { + for (ACLEntry aclEntry : acl) { + if (!LocalGroups.getInstance().isGroupDefined(aclEntry.getGroupName())) { + log.warn("!!!!! Local Group for ACL ('{}') is not defined!", aclEntry); + result = false; + } + } + } + } + } + } + } + if (result) { + log.info("All local groups are defined. "); + } else { + log.warn("Please check the local group needed to StoRM"); + } + return result; + } + + /** + * Check if the root of the VFS exists. + * + * @todo: this method don't check if the root is accessible by storm user. + * + * @return true if "filesystems" element (list of VFS) is valid false otherwise + */ + private boolean checkVFS() { + + log.info("Namespace checking VFSs .."); + boolean result = true; + if (vfss == null) { + log.error("Anyone VFS is defined in namespace!"); + return false; + } else { + List rules = new ArrayList<>(vfss.values()); + Iterator scan = rules.iterator(); + + while (scan.hasNext()) { + VirtualFS vfs = scan.next(); + + String aliasName = vfs.getAliasName(); + log.debug("VFS named '{}' found.", aliasName); + String root = vfs.getRootPath(); + File file = new File(root); + boolean exists = file.exists(); + if (!exists) { + log.error("ERROR in NAMESPACE: The VFS '{}' does not have a valid root :'{}'", aliasName, + root); + result = false; + } + } + } + if (result) { + log.info(" VFSs are well-defined."); + } + return result; + } + + private boolean checkMapRules() { + + boolean result = true; + if (maprules == null) { + return false; + } else { + int nrOfMappingRules = maprules.size(); + log.debug("Number of Mapping rules = {}", nrOfMappingRules); + List rules = new ArrayList<>(maprules.values()); + Iterator scan = rules.iterator(); + MappingRule rule; + String mappedVFS; + boolean check = false; + while (scan.hasNext()) { + rule = scan.next(); + mappedVFS = rule.getMappedFS().getAliasName(); + check = vfss.containsKey(mappedVFS); + if (!check) { + log.error("ERROR in NAMESPACE - MAP RULE '{}' point a UNKNOWN VFS '{}'!", + rule.getRuleName(), mappedVFS); + result = false; + } + } + } + return result; + + } + + private boolean checkAppRules() { + + boolean result = true; + if (apprules == null) { + return false; + } else { + int nrOfApproachableRules = apprules.size(); + log.debug("Number of Approachable rules = {}", nrOfApproachableRules); + List rules = new ArrayList<>(apprules.values()); + Iterator scan = rules.iterator(); + boolean check = false; + while (scan.hasNext()) { + ApproachableRule rule = scan.next(); + List approachVFSs = Lists.newArrayList(rule.getApproachableVFS()); + for (VirtualFS aVfs : approachVFSs) { + check = vfss.containsKey(aVfs.getAliasName()); + if (!check) { + log.error("ERROR in NAMESPACE - APP RULE '{}' point a UNKNOWN VFS '{}'!", + rule.getRuleName(), aVfs); + result = false; + } + } + } + } + return result; + } } diff --git a/src/main/java/it/grid/storm/namespace/config/xml/XMLNamespaceLoader.java b/src/main/java/it/grid/storm/namespace/config/xml/XMLNamespaceLoader.java index c25dd1341..c10f45739 100644 --- a/src/main/java/it/grid/storm/namespace/config/xml/XMLNamespaceLoader.java +++ b/src/main/java/it/grid/storm/namespace/config/xml/XMLNamespaceLoader.java @@ -4,17 +4,10 @@ */ package it.grid.storm.namespace.config.xml; -import it.grid.storm.namespace.NamespaceValidator; -import it.grid.storm.namespace.config.NamespaceLoader; - import static java.io.File.separatorChar; import java.io.File; import java.io.IOException; -import java.util.Observable; -import java.util.Observer; -import java.util.Timer; -import java.util.TimerTask; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; @@ -29,321 +22,194 @@ import org.w3c.dom.Element; import org.xml.sax.SAXException; -/** - *

- * Title: - *

- * - *

- * Description: - *

- * - *

- * Copyright: Copyright (c) 2006 - *

- * - *

- * Company: INFN-CNAF - *

- * - * @author Riccardo Zappi - * @version 1.0 - */ -public class XMLNamespaceLoader extends Observable implements NamespaceLoader { - - private static Logger log = LoggerFactory.getLogger(XMLNamespaceLoader.class); - - public String filename; - public String path; - public int refresh; // refresh time in seconds before the configuration is - // checked for a change in parameters! - private XMLConfiguration config = null; - private final int delay = 1000; // delay for 5 sec. - private long period = -1; - private final Timer timer = new Timer(); - private XMLReloadingStrategy xmlStrategy; - private String namespaceFN = null; - private final String namespaceSchemaURL; - - public boolean schemaValidity = false; - - public XMLNamespaceLoader() { - - // Build the namespaceFileName - namespaceFN = getNamespaceFileName(); - namespaceSchemaURL = getNamespaceSchemaFileName(); - init(namespaceFN, refresh); - } - - public XMLNamespaceLoader(int refresh) { - - if (refresh < 0) { - this.refresh = 0; - } else { - this.refresh = refresh; - } - namespaceFN = getNamespaceFileName(); - namespaceSchemaURL = getNamespaceSchemaFileName(); - log.debug("Namespace XSD : {}", namespaceSchemaURL); - init(namespaceFN, refresh); - } - - public XMLNamespaceLoader(String filename) { - - this.filename = filename; - namespaceFN = getNamespaceFileName(); - namespaceSchemaURL = getNamespaceSchemaFileName(); - log.debug("Namespace XSD : {}", namespaceSchemaURL); - init(namespaceFN, refresh); - } - - public XMLNamespaceLoader(String path, String filename) { - - this.path = path; - this.filename = filename; - namespaceFN = getNamespaceFileName(); - namespaceSchemaURL = getNamespaceSchemaFileName(); - log.debug("Namespace XSD : {}", namespaceSchemaURL); - init(namespaceFN, refresh); - } - - public XMLNamespaceLoader(String path, String filename, int refresh) { - - if (refresh < 0) { - this.refresh = 0; - } else { - this.refresh = refresh; - } - this.path = path; - this.filename = filename; - namespaceFN = getNamespaceFileName(); - namespaceSchemaURL = getNamespaceSchemaFileName(); - log.debug("Namespace XSD : {}", namespaceSchemaURL); - init(namespaceFN, refresh); - } - - public void setObserver(Observer obs) { - - addObserver(obs); - } - - public void setNotifyManaged() { - - xmlStrategy.notifingPerformed(); - config.setReloadingStrategy(xmlStrategy); - } - - /** - * The setChanged() protected method must overridden to make it public - */ - @Override - public synchronized void setChanged() { - - super.setChanged(); - } - - private void init(String namespaceFileName, int refresh) { - - log.info("Reading Namespace configuration file {} and setting refresh rate to {} seconds.", namespaceFileName, refresh); - - // create reloading strategy for refresh - xmlStrategy = new XMLReloadingStrategy(); - period = 3000; // Conversion in millisec. - log.debug(" Refresh time is {} millisec", period); - xmlStrategy.setRefreshDelay(period); // Set to refresh sec the refreshing delay. - - namespaceFN = namespaceFileName; - - // specify the properties file and set the reloading strategy for that file - try { - config = new XMLConfiguration(); - config.setFileName(namespaceFileName); - - // Validation of Namespace.xml - log.debug(" ... CHECK of VALIDITY of NAMESPACE Configuration ..."); - - schemaValidity = XMLNamespaceLoader.checkValidity(namespaceSchemaURL, - namespaceFileName); - if (!(schemaValidity)) { - log.error("NAMESPACE IS NOT VALID IN RESPECT OF NAMESPACE SCHEMA! "); - throw new ConfigurationException("XML is not valid!"); - } else { - log.debug("Namespace is valid in respect of NAMESPACE SCHEMA."); - } - - // This will throw a ConfigurationException if the XML document does not - // conform to its DTD. - - config.setReloadingStrategy(xmlStrategy); - - Peeper peeper = new Peeper(this); - timer.schedule(peeper, delay, period); - - log.debug("Timer initialized"); - - config.load(); - log.debug("Namespace Configuration read!"); - - } catch (ConfigurationException cex) { - log.error("ATTENTION! Unable to load Namespace Configuration!", cex); - log.error(toString()); - } - - } - - private String getNamespaceFileName() { - - String configurationDir = it.grid.storm.config.Configuration.getInstance() - .configurationDir(); - // Looking for namespace configuration file - String namespaceFN = it.grid.storm.config.Configuration.getInstance() - .getNamespaceConfigFilename(); - // Build the filename - if (configurationDir.charAt(configurationDir.length() - 1) != separatorChar) { - configurationDir += Character.toString(separatorChar); - } - String namespaceAbsFN = configurationDir + namespaceFN; - // Check the namespace conf file accessibility - File nsFile = new File(namespaceAbsFN); - if (nsFile.exists()) { - log.debug("Found the namespace file : {}", namespaceAbsFN); - } else { - log.error("Unable to find the namespace file : {}", namespaceAbsFN); - } - return namespaceAbsFN; - } - - private String getNamespaceSchemaFileName() { - - String schemaName = it.grid.storm.config.Configuration.getInstance() - .getNamespaceSchemaFilename(); - - if ("Schema UNKNOWN!".equals(schemaName)) { - - schemaName = "namespace.xsd"; - String namespaceFN = getNamespaceFileName(); - File namespaceFile = new File(namespaceFN); - if (namespaceFile.exists()) { - DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); - try { - DocumentBuilder builder = factory.newDocumentBuilder(); - Document doc = builder.parse(namespaceFN); - Element rootElement = doc.getDocumentElement(); - String tagName = rootElement.getTagName(); - if ("namespace".equals(tagName)) { - if (rootElement.hasAttributes()) { - String value = rootElement - .getAttribute("xsi:noNamespaceSchemaLocation"); - if ((value != null) && (value.length() > 0)) { - schemaName = value; - } - } else { - log.error("{} don't have a valid root element attributes", namespaceFN); - } - } else { - log.error("{} don't have a valid root element.", namespaceFN); - } - - } catch (ParserConfigurationException | SAXException | IOException e) { - log.error("Error while parsing {}: {}", namespaceFN, e.getMessage(), e); - } - } - } - - return schemaName; - - } - - public Configuration getConfiguration() { - - return config; - } - - private static boolean checkValidity(String namespaceSchemaURL, - String filename) { - - NamespaceValidator validator = new NamespaceValidator(); - return validator.validateSchema(namespaceSchemaURL, filename); - } - - /** - * - *

- * Title: - *

- * - *

- * Description: - *

- * - *

- * Copyright: Copyright (c) 2006 - *

- * - *

- * Company: INFN-CNAF and ICTP/eGrid project - *

- * - * @author Riccardo Zappi - * @version 1.0 - */ - private class Peeper extends TimerTask { - - private XMLReloadingStrategy reloadingStrategy; - - private boolean signal; - private final XMLNamespaceLoader observed; - - public Peeper(XMLNamespaceLoader obs) { - - observed = obs; - } - - @Override - public void run() { - - // log.debug(" The glange of peeper.."); - reloadingStrategy = (XMLReloadingStrategy) config.getReloadingStrategy(); - boolean changed = reloadingStrategy.reloadingRequired(); - if (changed) { - log.debug(" NAMESPACE CONFIGURATION is changed ! "); - log.debug(" ... CHECK of VALIDITY of NAMESPACE Configuration ..."); - boolean valid = XMLNamespaceLoader.checkValidity(namespaceSchemaURL, - namespaceFN); - if (!valid) { - log - .debug(" Namespace configuration is not reloaded.. Please rectify the error."); - schemaValidity = false; - reloadingStrategy.notifingPerformed(); - reloadingStrategy.reloadingPerformed(); - } else { - log - .debug(" ... NAMESPACE Configuration is VALID in respect of Schema Grammar."); - log.debug(" ----> RELOADING "); - - schemaValidity = true; - - boolean forceReloading = it.grid.storm.config.Configuration - .getInstance().getNamespaceAutomaticReloading(); - if (forceReloading) { - config.reload(); - } else { - log - .debug(" ----> RELOAD of namespace don't be executed because NO AUTOMATIC RELOAD is configured."); - } - reloadingStrategy.reloadingPerformed(); - } - } - - signal = reloadingStrategy.notifingRequired(); - if ((signal)) { - observed.setChanged(); - observed.notifyObservers(" MSG : Namespace is changed!"); - reloadingStrategy.notifingPerformed(); - } - - } - - } +import it.grid.storm.namespace.NamespaceValidator; +import it.grid.storm.namespace.config.NamespaceLoader; +public class XMLNamespaceLoader implements NamespaceLoader { + + private static Logger log = LoggerFactory.getLogger(XMLNamespaceLoader.class); + + public String filename; + public String path; + public int refresh; // refresh time in seconds before the configuration is + // checked for a change in parameters! + private XMLConfiguration config = null; + private long period = -1; + private XMLReloadingStrategy xmlStrategy; + private String namespaceFN = null; + private final String namespaceSchemaURL; + + public boolean schemaValidity = false; + + public XMLNamespaceLoader() { + + // Build the namespaceFileName + namespaceFN = getNamespaceFileName(); + namespaceSchemaURL = getNamespaceSchemaFileName(); + init(namespaceFN, refresh); + } + + public XMLNamespaceLoader(int refresh) { + + if (refresh < 0) { + this.refresh = 0; + } else { + this.refresh = refresh; + } + namespaceFN = getNamespaceFileName(); + namespaceSchemaURL = getNamespaceSchemaFileName(); + log.debug("Namespace XSD : {}", namespaceSchemaURL); + init(namespaceFN, refresh); + } + + public XMLNamespaceLoader(String filename) { + + this.filename = filename; + namespaceFN = getNamespaceFileName(); + namespaceSchemaURL = getNamespaceSchemaFileName(); + log.debug("Namespace XSD : {}", namespaceSchemaURL); + init(namespaceFN, refresh); + } + + public XMLNamespaceLoader(String path, String filename) { + + this.path = path; + this.filename = filename; + namespaceFN = getNamespaceFileName(); + namespaceSchemaURL = getNamespaceSchemaFileName(); + log.debug("Namespace XSD : {}", namespaceSchemaURL); + init(namespaceFN, refresh); + } + + public XMLNamespaceLoader(String path, String filename, int refresh) { + + if (refresh < 0) { + this.refresh = 0; + } else { + this.refresh = refresh; + } + this.path = path; + this.filename = filename; + namespaceFN = getNamespaceFileName(); + namespaceSchemaURL = getNamespaceSchemaFileName(); + log.debug("Namespace XSD : {}", namespaceSchemaURL); + init(namespaceFN, refresh); + } + + public void setNotifyManaged() { + + xmlStrategy.notifingPerformed(); + config.setReloadingStrategy(xmlStrategy); + } + + private void init(String namespaceFileName, int refresh) { + + log.info("Reading Namespace configuration file {} and setting refresh rate to {} seconds.", + namespaceFileName, refresh); + + // create reloading strategy for refresh + xmlStrategy = new XMLReloadingStrategy(); + period = 3000; // Conversion in millisec. + log.debug(" Refresh time is {} millisec", period); + xmlStrategy.setRefreshDelay(period); // Set to refresh sec the refreshing delay. + + namespaceFN = namespaceFileName; + + // specify the properties file and set the reloading strategy for that file + try { + config = new XMLConfiguration(); + config.setFileName(namespaceFileName); + + // Validation of Namespace.xml + log.debug(" ... CHECK of VALIDITY of NAMESPACE Configuration ..."); + + schemaValidity = XMLNamespaceLoader.checkValidity(namespaceSchemaURL, namespaceFileName); + if (!(schemaValidity)) { + log.error("NAMESPACE IS NOT VALID IN RESPECT OF NAMESPACE SCHEMA! "); + throw new ConfigurationException("XML is not valid!"); + } else { + log.debug("Namespace is valid in respect of NAMESPACE SCHEMA."); + } + + config.load(); + log.debug("Namespace Configuration read!"); + + } catch (ConfigurationException cex) { + log.error("ATTENTION! Unable to load Namespace Configuration!", cex); + log.error(toString()); + } + + } + + private String getNamespaceFileName() { + + String configurationDir = it.grid.storm.config.StormConfiguration.getInstance().configurationDir(); + // Looking for namespace configuration file + String namespaceFN = + it.grid.storm.config.StormConfiguration.getInstance().getNamespaceConfigFilename(); + // Build the filename + if (configurationDir.charAt(configurationDir.length() - 1) != separatorChar) { + configurationDir += Character.toString(separatorChar); + } + String namespaceAbsFN = configurationDir + namespaceFN; + // Check the namespace conf file accessibility + File nsFile = new File(namespaceAbsFN); + if (nsFile.exists()) { + log.debug("Found the namespace file : {}", namespaceAbsFN); + } else { + log.error("Unable to find the namespace file : {}", namespaceAbsFN); + } + return namespaceAbsFN; + } + + private String getNamespaceSchemaFileName() { + + String schemaName = + it.grid.storm.config.StormConfiguration.getInstance().getNamespaceSchemaFilename(); + + if ("Schema UNKNOWN!".equals(schemaName)) { + + schemaName = "namespace.xsd"; + String namespaceFN = getNamespaceFileName(); + File namespaceFile = new File(namespaceFN); + if (namespaceFile.exists()) { + DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); + try { + DocumentBuilder builder = factory.newDocumentBuilder(); + Document doc = builder.parse(namespaceFN); + Element rootElement = doc.getDocumentElement(); + String tagName = rootElement.getTagName(); + if ("namespace".equals(tagName)) { + if (rootElement.hasAttributes()) { + String value = rootElement.getAttribute("xsi:noNamespaceSchemaLocation"); + if ((value != null) && (value.length() > 0)) { + schemaName = value; + } + } else { + log.error("{} don't have a valid root element attributes", namespaceFN); + } + } else { + log.error("{} don't have a valid root element.", namespaceFN); + } + + } catch (ParserConfigurationException | SAXException | IOException e) { + log.error("Error while parsing {}: {}", namespaceFN, e.getMessage(), e); + } + } + } + + return schemaName; + + } + + public Configuration getConfiguration() { + + return config; + } + + private static boolean checkValidity(String namespaceSchemaURL, String filename) { + + NamespaceValidator validator = new NamespaceValidator(); + return validator.validateSchema(namespaceSchemaURL, filename); + } } diff --git a/src/main/java/it/grid/storm/namespace/config/xml/XMLNamespaceParser.java b/src/main/java/it/grid/storm/namespace/config/xml/XMLNamespaceParser.java index 9ef02b31c..45b8a090c 100644 --- a/src/main/java/it/grid/storm/namespace/config/xml/XMLNamespaceParser.java +++ b/src/main/java/it/grid/storm/namespace/config/xml/XMLNamespaceParser.java @@ -11,21 +11,20 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; -import java.util.Observable; -import java.util.Observer; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import org.apache.commons.configuration.ConfigurationException; import org.apache.commons.configuration.XMLConfiguration; import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.Lists; +import com.google.common.collect.Maps; import it.grid.storm.balancer.BalancingStrategyType; import it.grid.storm.check.sanity.filesystem.SupportedFSType; import it.grid.storm.namespace.DefaultValuesInterface; -import it.grid.storm.namespace.NamespaceDirector; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.PropertyInterface; import it.grid.storm.namespace.config.NamespaceCheck; @@ -59,801 +58,748 @@ import it.grid.storm.util.GPFSSizeHelper; -public class XMLNamespaceParser implements NamespaceParser, Observer { +public class XMLNamespaceParser implements NamespaceParser { - private final Logger log = NamespaceDirector.getLogger(); + private static Logger log = LoggerFactory.getLogger(XMLNamespaceParser.class); - private String version; - private Map vfss; - private Map maprules; - private Map apprules; + private String version; + private Map vfss; + private Map maprules; + private Map apprules; - private XMLParserUtil parserUtil; - private final XMLConfiguration configuration; - private XMLNamespaceLoader xmlLoader; + private XMLParserUtil parserUtil; + private final XMLConfiguration configuration; - private final Lock refreshing = new ReentrantLock(); + private final Lock refreshing = new ReentrantLock(); - /** - * Constructor - * - * @param loader - * NamespaceLoader - */ - public XMLNamespaceParser(NamespaceLoader loader) { + /** + * Constructor + * + * @param loader NamespaceLoader + */ + public XMLNamespaceParser(NamespaceLoader loader, boolean semanticCheckEnabled) { - configuration = (XMLConfiguration) loader.getConfiguration(); - if (loader instanceof XMLNamespaceLoader) { - xmlLoader = (XMLNamespaceLoader) loader; - xmlLoader.setObserver(this); - } else { - log.error("XMLParser initialized with a non-XML Loader"); - } + configuration = (XMLConfiguration) loader.getConfiguration(); - parserUtil = new XMLParserUtil(configuration); + parserUtil = new XMLParserUtil(configuration); - for (Iterator iter = parserUtil.getKeys(); iter.hasNext();) { - log.debug("current item: {}", iter.next()); - } - - vfss = new HashMap<>(); - maprules = new HashMap<>(); - apprules = new HashMap<>(); - - boolean validNamespaceConfiguration = refreshCachedData(); - if (!validNamespaceConfiguration) { - log.error(" ???????????????????????????????????? "); - log.error(" ???? NAMESPACE does not VALID ???? "); - log.error(" ???????????????????????????????????? "); - log.error(" Please see the log. "); - System.exit(0); - } - - } - - public Map getVFSs() { - - return vfss; - } - - public Map getApproachableRules() { - - return apprules; - } - - public Map getMappingRules() { - - return maprules; - } - - public long getLastUpdateTime() { - - return 0L; - } - - public void update(Observable observed, Object arg) { - - log.debug("{} Refreshing Namespace Memory Cache .. ", arg); - - XMLNamespaceLoader loader = (XMLNamespaceLoader) observed; - parserUtil = new XMLParserUtil(loader.getConfiguration()); - - if (loader.schemaValidity) { - refreshCachedData(); - } - - loader.setNotifyManaged(); - - log.debug(" ... Cache Refreshing ended"); - } - - /**************************************************************** - * PRIVATE METHODs - *****************************************************************/ - - private boolean refreshCachedData() { - - boolean result = false; - try { - refreshing.lock(); - configuration.clear(); - configuration.clearTree("filesystems"); - configuration.clearTree("mapping-rules"); - configuration.clearTree("approachable-rules"); - try { - configuration.load(); - log.debug(" ... reading and parsing the namespace configuration from file!"); - } catch (ConfigurationException ex) { - log.error(ex.getMessage(), ex); - } - log.debug("REFRESHING CACHE.."); - // Save the cache content - log.debug(" ..save the cache content before semantic check"); - Map vfssSAVED = vfss; - Map maprulesSAVED = maprules; - Map apprulesSAVED = apprules; - // Refresh the cache content with new values - - log.debug(" ..refresh the cache"); - refreshCache(); - - // Do the checking on Namespace - log.debug(" ..semantic check of namespace"); - NamespaceCheck checker = new NamespaceCheck(vfss, maprules, apprules); - boolean semanticCheck = checker.check(); - - // If there is an error restore old cache content - log.debug("REFRESHING ENDED."); - if (semanticCheck) { - log.debug("Namespace is semantically valid"); - result = true; - } else { - log - .warn("Namespace does not semantically valid!, so no load performed!"); - vfss = vfssSAVED; - maprules = maprulesSAVED; - apprules = apprulesSAVED; - result = false; - } - } finally { - refreshing.unlock(); - } - return result; - } - - private void refreshCache() { - - log - .info(" ############## REFRESHING NAMESPACE CONFIGURATION CACHE : start ###############"); - - /************************** - * Retrieve Version Number - *************************/ - try { - retrieveVersion(); - } catch (NamespaceException ex1) { - log - .warn( - "Namespace configuration does not contain a valid version number.", - ex1); - /** - * @todo Manage this exceptional status! - */ - } - - /************************** - * Building VIRTUAL FS - *************************/ - try { - buildVFSs(); - } catch (ClassNotFoundException ex) { - log - .error("Namespace Configuration ERROR in VFS-DRIVER specification", ex); - /** - * @todo Manage this exceptional status! - */ - } catch (NamespaceException ex) { - log - .error( - "Namespace Configuration ERROR in VFS definition, please check it.", - ex); - /** - * @todo Manage this exceptional status! - */ - } - - /************************** - * Building MAPPING RULES - *************************/ - try { - buildMapRules(); - } catch (NamespaceException ex1) { - log - .error( - "Namespace Configuration ERROR in MAPPING RULES definition, please check it.", - ex1); - /** - * @todo Manage this exceptional status! - */ - } - - /************************** - * Building APPROACHABLE RULES - *************************/ - try { - buildAppRules(); - } catch (NamespaceException ex2) { - log - .error( - "Namespace Configuration ERROR in APPROACHABLE RULES definition, please check it.", - ex2); - /** - * @todo Manage this exceptional status! - */ - } - log - .info(" ############## REFRESHING NAMESPACE CONFIGURATION CACHE : end ###############"); - - handleTotalOnlineSizeFromGPFSQuota(); - // Update SA within Reserved Space Catalog - updateSA(); - } - - private void handleTotalOnlineSizeFromGPFSQuota() { - - for (Entry entry : vfss.entrySet()) { - String storageAreaName = entry.getKey(); - VirtualFS storageArea = entry.getValue(); - if (SupportedFSType.parseFS(storageArea.getFSType()) == SupportedFSType.GPFS) { - Quota quota = storageArea.getCapabilities().getQuota(); - if (quota != null && quota.getEnabled()) { - - GPFSFilesetQuotaInfo quotaInfo = getGPFSQuotaInfo(storageArea); - if (quotaInfo != null) { - updateTotalOnlineSizeFromGPFSQuota(storageAreaName, storageArea, - quotaInfo); - } - } - } - } - } - - private GPFSFilesetQuotaInfo getGPFSQuotaInfo(VirtualFS storageArea) { - - GetGPFSFilesetQuotaInfoCommand cmd = new GetGPFSFilesetQuotaInfoCommand(storageArea); - - try { - return cmd.call(); - } catch (Throwable t) { - log - .warn( - "Cannot get quota information out of GPFS. Using the TotalOnlineSize in namespace.xml " - + "for Storage Area {}. Reason: {}", storageArea.getAliasName(), - t.getMessage()); - return null; - } - } - - private void updateTotalOnlineSizeFromGPFSQuota(String storageAreaName, - VirtualFS storageArea, GPFSFilesetQuotaInfo quotaInfo) { - - long gpfsTotalOnlineSize = GPFSSizeHelper.getBytesFromKIB(quotaInfo - .getBlockSoftLimit()); - Property newProperties = Property.from(storageArea.getProperties()); - try { - newProperties.setTotalOnlineSize(SizeUnitType.BYTE.getTypeName(), - gpfsTotalOnlineSize); - storageArea.setProperties(newProperties); - log.warn("TotalOnlineSize as specified in namespace.xml will be ignored " - + "since quota is enabled on the GPFS {} Storage Area.", - storageAreaName); - } catch (NamespaceException e) { - log - .warn( - "Cannot get quota information out of GPFS. Using the TotalOnlineSize in namespace.xml " - + "for Storage Area {}.", storageAreaName, e); - } - } - - // ******************* Update SA Catalog *************************** - private void updateSA() { - - TSpaceToken spaceToken = null; - // ReservedSpaceCatalog spaceCatalog = new ReservedSpaceCatalog(); - SpaceHelper spaceHelp = new SpaceHelper(); - log - .debug("Updating Space Catalog with Storage Area defined within NAMESPACE"); - VirtualFS vfs = null; - Iterator scan = vfss.values().iterator(); - while (scan.hasNext()) { - - vfs = (VirtualFS) scan.next(); - String vfsAliasName = vfs.getAliasName(); - log.debug(" Considering VFS : {}", vfsAliasName); - String aliasName = vfs.getSpaceTokenDescription(); - if (aliasName == null) { - // Found a VFS without the optional element Space Token Description - log.debug("XMLNamespaceParser.UpdateSA() : Found a VFS ('{}') without space-token-description. " - + "Skipping the Update of SA", vfsAliasName); - } else { - TSizeInBytes onlineSize = vfs.getProperties().getTotalOnlineSize(); - String spaceFileName = vfs.getRootPath(); - spaceToken = spaceHelp.createVOSA_Token(aliasName, onlineSize, - spaceFileName); - vfs.setSpaceToken(spaceToken); - - log.debug(" Updating SA ('{}'), token:'{}', onlineSize:'{}', spaceFileName:'{}'", - aliasName, spaceToken, onlineSize, spaceFileName); - } - - } - spaceHelp.purgeOldVOSA_token(); - log.debug("Updating Space Catalog... DONE!!"); - - } - - // ******************* VERSION NUMBER *************************** - private void retrieveVersion() throws NamespaceException { - - version = parserUtil.getNamespaceVersion(); - log.debug(" ==== NAMESPACE VERSION : '{}' ====", version); - } - - // ******************* VIRTUAL FS *************************** - - private void buildVFSs() throws ClassNotFoundException, NamespaceException { - - int nrOfVFS = parserUtil.getNumberOfFS(); - // For each VFS within configuration build VFS class instance -// VirtualFS vfs; -// String spaceTokenDescription = null; -// StorageClassType storageClass; -// String root = null; -// String name; -// String fsType; -// Class driver; -// String storageAreaAuthz; -// PropertyInterface prop; -// CapabilityInterface cap; -// DefaultValuesInterface defValues; -// SAAuthzType saAuthzType; - - for (int i = 0; i < nrOfVFS; i++) { - // Building VFS - VirtualFS vfs = new VirtualFS(); - // name - String name = parserUtil.getFSName(i); - vfs.setAliasName(name); - log.debug("VFS({}).name = '{}'", i, name); - // fs type - String fsType = parserUtil.getFSType(name); - vfs.setFSType(fsType); - log.debug("VFS({}).fs_type = '{}'", name, fsType); - // space token - String spaceTokenDescription = parserUtil.getFSSpaceTokenDescription(name); - vfs.setSpaceTokenDescription(spaceTokenDescription); - log.debug("VFS({}).space-token-description = '{}'", name, spaceTokenDescription); - // storage class - StorageClassType storageClass = StorageClassType.getStorageClassType(parserUtil.getStorageClass(name)); - vfs.setStorageClassType(storageClass); - log.debug("VFS({}).storage-class = '{}'", name, storageClass); - // root path - String root = parserUtil.getFSRoot(name); - vfs.setRoot(root); - log.debug("VFS({}).root = '{}'", name, root); - // fs driver - Class fsDriver = Class.forName(parserUtil.getFSDriver(name)); - vfs.setFSDriver(fsDriver); - log.debug("VFS({}).fsDriver [CLASS Name] = '{}'", name, fsDriver.getName()); - // space driver - Class spaceDriver = Class.forName(parserUtil.getSpaceDriver(name)); - vfs.setSpaceSystemDriver(spaceDriver); - log.debug("VFS({}).spaceDriver [CLASS Name] = '{}'", name, spaceDriver.getName()); - // authz type - SAAuthzType saAuthzType = parserUtil.getStorageAreaAuthzType(name); - vfs.setSAAuthzType(saAuthzType); - log.debug("VFS({}).storage-area-authz.TYPE = '{}'", name, saAuthzType); - // storage area authz - String storageAreaAuthz = parserUtil.getStorageAreaAuthz(name, saAuthzType); - vfs.setSAAuthzSource(storageAreaAuthz); - log.debug("VFS({}).storage-area-authz = '{}'", name, storageAreaAuthz); - // properties - PropertyInterface prop = buildProperties(name); - vfs.setProperties(prop); - // capabilities - Capability cap = buildCapabilities(name); - vfs.setCapabilities(cap); - - DefaultValuesInterface defValues = buildDefaultValues(name); - vfs.setDefaultValues(defValues); - - // Adding VFS - synchronized (this) { - vfss.remove(name); - vfss.put(name, vfs); - } - } - } - - // ******************* PROPERTY *************************** - private PropertyInterface buildProperties(String fsName) - throws NamespaceException { - - Property prop = new Property(); - - String accessLatency = parserUtil.getAccessLatencyType(fsName); - prop.setAccessLatency(accessLatency); - log.debug("VFS({}).Properties.AccessLatency = '{}'", fsName, accessLatency); - - String expirationMode = parserUtil.getExpirationModeType(fsName); - prop.setExpirationMode(expirationMode); - log.debug("VFS({}).Properties.ExpirationMode = '{}'", fsName, expirationMode); - - String retentionPolicy = parserUtil.getRetentionPolicyType(fsName); - prop.setRetentionPolicy(retentionPolicy); - log.debug("VFS({}).Properties.RetentionPolicy = '{}'", fsName, retentionPolicy); - - String unitType = parserUtil.getNearlineSpaceUnitType(fsName); - long nearLineSize = parserUtil.getNearlineSpaceSize(fsName); - prop.setTotalNearlineSize(unitType, nearLineSize); - log.debug("VFS({}).Properties.NearlineSpaceSize = '{} {}'", fsName, nearLineSize, unitType); - - unitType = parserUtil.getOnlineSpaceUnitType(fsName); - long onlineSize = parserUtil.getOnlineSpaceSize(fsName); - prop.setTotalOnlineSize(unitType, onlineSize); - log.debug("VFS({}).Properties.OnlineSpaceSize = '{} {}'", fsName, onlineSize, unitType); - - boolean hasLimitedSize = parserUtil.getOnlineSpaceLimitedSize(fsName); - prop.setLimitedSize(hasLimitedSize); - log.debug("VFS({}).Properties.OnlineSpaceLimitedSize = '{}'", fsName, hasLimitedSize); - - return prop; - } - - // ******************* CAPABILITY *************************** - - private Capability buildCapabilities(String fsName) - throws NamespaceException { - - /** - * ACL MODE ELEMENT - */ - ACLMode aclMode = ACLMode.makeFromString(parserUtil.getACLMode(fsName)); - Capability cap = new Capability(aclMode); - log.debug("VFS({}).Capabilities.aclMode = '{}'", fsName, aclMode); - - /** - * DEFAULT ACL - */ - boolean defaultACLDefined = parserUtil.getDefaultACLDefined(fsName); - log.debug("VFS({}).Capabilities.defaultACL [Defined?] = {}", fsName, defaultACLDefined); - if (defaultACLDefined) { - int nrACLEntries = parserUtil.getNumberOfACL(fsName); - String groupName = null; - String filePermString = null; - ACLEntry aclEntry = null; - for (int entryNumber = 0; entryNumber < nrACLEntries; entryNumber++) { - groupName = parserUtil.getGroupName(fsName, entryNumber); - filePermString = parserUtil.getPermissionString(fsName, entryNumber); - try { - aclEntry = new ACLEntry(groupName, filePermString); - cap.addACLEntry(aclEntry); - } catch (PermissionException permEx) { - log.error("Namespace XML Parser -- ERROR -- : {}", permEx.getMessage()); - } - } - log.debug("VFS({}).Capabilities.defaultACL = {}", fsName, cap.getDefaultACL()); - } - - /** - * QUOTA ELEMENT - */ - boolean quotaDefined = parserUtil.getQuotaDefined(fsName); - Quota quota = null; - if (quotaDefined) { - boolean quotaEnabled = parserUtil.getQuotaEnabled(fsName); - String device = parserUtil.getQuotaDevice(fsName); - - QuotaType quotaType; - String quotaValue = null; - - if (parserUtil.getQuotaFilesetDefined(fsName)) { - quotaType = QuotaType.buildQuotaType(QuotaType.FILESET); - quotaValue = parserUtil.getQuotaFileset(fsName); - } else { - if (parserUtil.getQuotaGroupIDDefined(fsName)) { - quotaType = QuotaType.buildQuotaType(QuotaType.GRP); - quotaValue = parserUtil.getQuotaGroupID(fsName); - } else { - if (parserUtil.getQuotaUserIDDefined(fsName)) { - quotaType = QuotaType.buildQuotaType(QuotaType.USR); - quotaValue = parserUtil.getQuotaUserID(fsName); - } else { - quotaType = QuotaType.buildQuotaType(QuotaType.UNKNOWN); - quotaValue = "unknown"; - } - } - } - - quotaType.setValue(quotaValue); - quota = new Quota(quotaEnabled, device, quotaType); - - } else { - quota = new Quota(); - } - cap.setQuota(quota); - - log.debug("VFS({}).Capabilities.quota = '{}'", fsName, quota); - - /** - * TRANSFER PROTOCOL - */ - int nrProtocols = parserUtil.getNumberOfProt(fsName); - for (int protCounter = 0; protCounter < nrProtocols; protCounter++) { - int protocolIndex = parserUtil.getProtId(fsName, protCounter); - String name = parserUtil.getProtName(fsName, protCounter); - String schema = parserUtil.getProtSchema(fsName, protCounter); - Protocol protocol = Protocol.getProtocol(schema); - protocol.setProtocolServiceName(name); - String serviceHostName = parserUtil.getProtHost(fsName, protCounter); - String servicePortValue = parserUtil.getProtPort(fsName, protCounter); - int portIntValue = -1; - Authority service = null; - if (servicePortValue != null) { - try { - portIntValue = Integer.parseInt(servicePortValue); - service = new Authority(serviceHostName, portIntValue); - } catch (NumberFormatException nfe) { - log - .warn("to evaluate the environmental variable " + servicePortValue); - } - } else { - service = new Authority(serviceHostName); - } - TransportProtocol transportProt = new TransportProtocol(protocol, service); - transportProt.setProtocolID(protocolIndex); - log.debug("VFS({}).Capabilities.protocol({}) = '{}'", fsName, protCounter, transportProt); - cap.addTransportProtocolByScheme(protocol, transportProt); - cap.addTransportProtocol(transportProt); - if (protocolIndex != -1) { - cap.addTransportProtocolByID(protocolIndex, transportProt); - } - - } - - /** - * PROTOCOL POOL - */ - int nrPools = parserUtil.getNumberOfPool(fsName); - if (nrPools > 0) { - - for (int poolCounter = 0; poolCounter < nrPools; poolCounter++) { - BalancingStrategyType balanceStrategy = BalancingStrategyType - .getByValue(parserUtil.getBalancerStrategy(fsName, poolCounter)); - List poolMembers = Lists.newArrayList(); - int nrMembers = parserUtil.getNumberOfPoolMembers(fsName, poolCounter); - for (int i = 0; i < nrMembers; i++) { - int protIndex = parserUtil.getMemberID(fsName, poolCounter, i); - TransportProtocol tProtMember = cap.getProtocolByID(protIndex); - if (tProtMember != null) { - PoolMember poolMember; - if (balanceStrategy.requireWeight()) { - int memberWeight = parserUtil.getMemberWeight(fsName, - poolCounter, i); - poolMember = new PoolMember(protIndex, tProtMember, memberWeight); - } else { - poolMember = new PoolMember(protIndex, tProtMember); - } - poolMembers.add(poolMember); - } else { // member pointed out doesn't exist!! - String errorMessage = String.format("POOL Building: Protocol with index %d does not exists in the VFS : %s", protIndex, fsName); - log.error(errorMessage); - throw new NamespaceException(errorMessage); - } - } - verifyPoolIsValid(poolMembers); - Protocol poolProtocol = poolMembers.get(0).getMemberProtocol().getProtocol(); - log.debug("Defined pool for protocol {} with size {}", poolProtocol, - poolMembers.size()); - ProtocolPool pool = new ProtocolPool(balanceStrategy, poolMembers); - cap.addProtocolPool(pool); - cap.addProtocolPoolBySchema(poolProtocol, pool); - log.debug("PROTOCOL POOL: {}", cap.getPoolByScheme(poolProtocol)); - } - } else { - log.debug("Pool is not defined in VFS {}", fsName); - } - - return cap; - } - - /** - * @param poolMembers - * @throws NamespaceException - */ - private void verifyPoolIsValid(List poolMembers) - throws NamespaceException { - - if (poolMembers.isEmpty()) { - throw new NamespaceException("POOL Defined is EMPTY!"); - } - Protocol prot = poolMembers.get(0).getMemberProtocol().getProtocol(); - for (PoolMember member : poolMembers) { - if (!(member.getMemberProtocol().getProtocol().equals(prot))) { - throw new NamespaceException( - "Defined Pool is NOT HOMOGENEOUS! Protocols " + prot.toString() - + " and " + member.toString() + " differs"); - } - } - } - - // ******************* DEFAULT VALUES *************************** - - private DefaultValuesInterface buildDefaultValues(String fsName) - throws NamespaceException { - - DefaultValues def = new DefaultValues(); - if (parserUtil.isDefaultElementPresent(fsName)) { - setSpaceDef(fsName, def); - setFileDef(fsName, def); - } else { // Produce Default Values with default values :o ! - log.debug("VFS({}).DefaultValues is ABSENT. Using DEFAULT values.", fsName); - } - return def; - } - - private void setSpaceDef(String fsName, DefaultValues def) - throws NamespaceException { - - String spaceType = parserUtil.getDefaultSpaceType(fsName); - log.debug("VFS({}).DefaultValues.space.type = '{}'", fsName, spaceType); - long lifeTime = parserUtil.getDefaultSpaceLifeTime(fsName); - log.debug("VFS({}).DefaultValues.space.lifeTime = ''", fsName, lifeTime); - long guarSize = parserUtil.getDefaultSpaceGuarSize(fsName); - log.debug("VFS({}).DefaultValues.space.guarSize = '{}'", fsName, guarSize); - long totSize = parserUtil.getDefaultSpaceTotSize(fsName); - log.debug("VFS({}).DefaultValues.space.totSize = '{}'", fsName, totSize); - def.setSpaceDefaults(spaceType, lifeTime, guarSize, totSize); - } - - private void setFileDef(String fsName, DefaultValues def) - throws NamespaceException { - - String fileType = parserUtil.getDefaultFileType(fsName); - log.debug("VFS({}).DefaultValues.file.type = '{}'", fsName, fileType); - long lifeTime = parserUtil.getDefaultFileLifeTime(fsName); - log.debug("VFS({}).DefaultValues.file.lifeTime = '{}'", fsName, lifeTime); - def.setFileDefaults(fileType, lifeTime); - } - - // ******************* MAPPING RULE *************************** - - private void buildMapRules() throws NamespaceException { - - int numOfMapRules = parserUtil.getNumberOfMappingRule(); - String ruleName; - String stfnRoot; - String mappedFS; - MappingRule mapRule; - - for (int i = 0; i < numOfMapRules; i++) { - ruleName = parserUtil.getMapRuleName(i); - mappedFS = parserUtil.getMapRule_mappedFS(ruleName); - // Adding mapping rule to VFS within vfss; - if (vfss.containsKey(mappedFS)) { - log.debug("VFS '{}' pointed by RULE : '{}' exists.", mappedFS, ruleName); - stfnRoot = parserUtil.getMapRule_StFNRoot(ruleName); - VirtualFS vfs = vfss.get(mappedFS); - mapRule = new MappingRule(ruleName, stfnRoot, vfs); - ((VirtualFS) vfs).addMappingRule(mapRule); - maprules.put(ruleName, mapRule); - } else { - log.error("VFS '{}' pointed by RULE : '{}' DOES NOT EXISTS.", mappedFS, ruleName); - } - } - } - - // ******************* APPROACHABLE RULE *************************** - - private void buildAppRules() throws NamespaceException { - - int numOfAppRules = parserUtil.getNumberOfApproachRule(); - - String ruleName; - String dn; - String vo_name; - String relPath; - String anonymousHttpReadString; - List appFSList; - ApproachableRule appRule; - - log.debug("Number of APP Rule : {}", numOfAppRules); - - - for (int i = 0; i < numOfAppRules; i++) { - ruleName = parserUtil.getApproachRuleName(i); - log.debug(" APP rule nr: {} is named : {}", i, ruleName); - - dn = parserUtil.getAppRule_SubjectDN(ruleName); - vo_name = parserUtil.getAppRule_SubjectVO(ruleName); - SubjectRules subjectRules = new SubjectRules(dn, vo_name); - - relPath = parserUtil.getAppRule_RelativePath(ruleName); - - anonymousHttpReadString = parserUtil - .getAppRule_AnonymousHttpRead(ruleName); - if (anonymousHttpReadString != null - && !anonymousHttpReadString.trim().isEmpty()) { - appRule = new ApproachableRule(ruleName, subjectRules, relPath, - Boolean.parseBoolean(anonymousHttpReadString)); - } else { - appRule = new ApproachableRule(ruleName, subjectRules, relPath); - } - - appFSList = parserUtil.getAppRule_AppFS(ruleName); - for (String appFS : appFSList) { - if (vfss.containsKey(appFS)) { - log.debug("VFS '{}' pointed by RULE : '{}' exists.", appFS, ruleName); - VirtualFS vfs = vfss.get(appFS); - ((VirtualFS) vfs).addApproachableRule(appRule); - appRule.addApproachableVFS(vfs); - } else { - log.error("VFS '{}' pointed by RULE : '{}' DOES NOT EXISTS.", appFS, ruleName); - } - } - apprules.put(ruleName, appRule); - } - } - - /***************************************************************************** - * BUSINESS METHODs - ****************************************************************************/ - - public String getNamespaceVersion() { - - return version; - } - - public List getAllVFS_Roots() { - - Collection elem = vfss.values(); - List roots = new ArrayList<>(vfss.size()); - Iterator scan = elem.iterator(); - while (scan.hasNext()) { - String root = null; - root = scan.next().getRootPath(); - roots.add(root); - } - return roots; - } - - public Map getMapVFS_Root() { - - Map result = new HashMap<>(); - Collection elem = vfss.values(); - Iterator scan = elem.iterator(); - while (scan.hasNext()) { - String root = null; - VirtualFS vfs = scan.next(); - root = vfs.getRootPath(); - result.put(root, vfs); - } - return result; - } - - public List getAllMappingRule_StFNRoots() { - - Collection elem = maprules.values(); - List roots = new ArrayList<>(maprules.size()); - Iterator scan = elem.iterator(); - String root = null; - while (scan.hasNext()) { - root = scan.next().getStFNRoot(); - roots.add(root); - } - return roots; - } - - public Map getMappingRuleMAP() { - - Map map = new HashMap<>(); - Collection elem = maprules.values(); - Iterator scan = elem.iterator(); - String root = null; - String name = null; - MappingRule rule; - while (scan.hasNext()) { - rule = scan.next(); - root = rule.getStFNRoot(); - name = rule.getRuleName(); - map.put(name, root); - } - return map; - } - - public VirtualFS getVFS(String vfsName) { - - return vfss.get(vfsName); - } + for (Iterator iter = parserUtil.getKeys(); iter.hasNext();) { + log.debug("current item: {}", iter.next()); + } + + vfss = Maps.newHashMap(); + maprules = Maps.newHashMap(); + apprules = Maps.newHashMap(); + + boolean validNamespaceConfiguration = refreshCachedData(semanticCheckEnabled); + if (!validNamespaceConfiguration) { + log.error("Invalid NAMESPACE! Please see the log."); + System.exit(1); + } + + } + + public Map getVFSs() { + + return vfss; + } + + public Map getApproachableRules() { + + return apprules; + } + + public Map getMappingRules() { + + return maprules; + } + + public long getLastUpdateTime() { + + return 0L; + } + + private boolean refreshCachedData(boolean semanticCheckEnabled) { + + boolean result = false; + try { + refreshing.lock(); + configuration.clear(); + configuration.clearTree("filesystems"); + configuration.clearTree("mapping-rules"); + configuration.clearTree("approachable-rules"); + try { + configuration.load(); + log.debug(" ... reading and parsing the namespace configuration from file!"); + } catch (ConfigurationException ex) { + log.error(ex.getMessage(), ex); + } + log.debug("REFRESHING CACHE.."); + // Save the cache content + log.debug(" ..save the cache content before semantic check"); + Map vfssSAVED = vfss; + Map maprulesSAVED = maprules; + Map apprulesSAVED = apprules; + // Refresh the cache content with new values + + log.debug(" ..refresh the cache"); + refreshCache(); + + if (semanticCheckEnabled) { + + // Do the checking on Namespace + log.debug(" ..semantic check of namespace"); + NamespaceCheck checker = new NamespaceCheck(vfss, maprules, apprules); + boolean semanticCheck = checker.check(); + + // If there is an error restore old cache content + if (semanticCheck) { + log.debug("Namespace is semantically valid"); + result = true; + } else { + log.warn("Namespace does not semantically valid!, so no load performed!"); + vfss = vfssSAVED; + maprules = maprulesSAVED; + apprules = apprulesSAVED; + result = false; + } + } else { + result = true; + } + + log.debug("REFRESHING ENDED."); + + } finally { + refreshing.unlock(); + } + return result; + } + + private void refreshCache() { + + log.info(" ############## REFRESHING NAMESPACE CONFIGURATION CACHE : start ###############"); + + /************************** + * Retrieve Version Number + *************************/ + try { + retrieveVersion(); + } catch (NamespaceException ex1) { + log.warn("Namespace configuration does not contain a valid version number.", ex1); + /** + * @todo Manage this exceptional status! + */ + } + + /************************** + * Building VIRTUAL FS + *************************/ + try { + buildVFSs(); + } catch (ClassNotFoundException ex) { + log.error("Namespace Configuration ERROR in VFS-DRIVER specification", ex); + /** + * @todo Manage this exceptional status! + */ + } catch (NamespaceException ex) { + log.error("Namespace Configuration ERROR in VFS definition, please check it.", ex); + /** + * @todo Manage this exceptional status! + */ + } + + /************************** + * Building MAPPING RULES + *************************/ + try { + buildMapRules(); + } catch (NamespaceException ex1) { + log.error("Namespace Configuration ERROR in MAPPING RULES definition, please check it.", ex1); + /** + * @todo Manage this exceptional status! + */ + } + + /************************** + * Building APPROACHABLE RULES + *************************/ + try { + buildAppRules(); + } catch (NamespaceException ex2) { + log.error("Namespace Configuration ERROR in APPROACHABLE RULES definition, please check it.", + ex2); + /** + * @todo Manage this exceptional status! + */ + } + log.info(" ############## REFRESHING NAMESPACE CONFIGURATION CACHE : end ###############"); + + handleTotalOnlineSizeFromGPFSQuota(); + // Update SA within Reserved Space Catalog + updateSA(); + } + + private void handleTotalOnlineSizeFromGPFSQuota() { + + for (Entry entry : vfss.entrySet()) { + String storageAreaName = entry.getKey(); + VirtualFS storageArea = entry.getValue(); + if (SupportedFSType.parseFS(storageArea.getFSType()) == SupportedFSType.GPFS) { + Quota quota = storageArea.getCapabilities().getQuota(); + if (quota != null && quota.getEnabled()) { + + GPFSFilesetQuotaInfo quotaInfo = getGPFSQuotaInfo(storageArea); + if (quotaInfo != null) { + updateTotalOnlineSizeFromGPFSQuota(storageAreaName, storageArea, quotaInfo); + } + } + } + } + } + + private GPFSFilesetQuotaInfo getGPFSQuotaInfo(VirtualFS storageArea) { + + GetGPFSFilesetQuotaInfoCommand cmd = new GetGPFSFilesetQuotaInfoCommand(storageArea); + + try { + return cmd.call(); + } catch (Throwable t) { + log.warn( + "Cannot get quota information out of GPFS. Using the TotalOnlineSize in namespace.xml " + + "for Storage Area {}. Reason: {}", + storageArea.getAliasName(), t.getMessage()); + return null; + } + } + + private void updateTotalOnlineSizeFromGPFSQuota(String storageAreaName, VirtualFS storageArea, + GPFSFilesetQuotaInfo quotaInfo) { + + long gpfsTotalOnlineSize = GPFSSizeHelper.getBytesFromKIB(quotaInfo.getBlockSoftLimit()); + Property newProperties = Property.from(storageArea.getProperties()); + try { + newProperties.setTotalOnlineSize(SizeUnitType.BYTE.getTypeName(), gpfsTotalOnlineSize); + storageArea.setProperties(newProperties); + log.warn("TotalOnlineSize as specified in namespace.xml will be ignored " + + "since quota is enabled on the GPFS {} Storage Area.", storageAreaName); + } catch (NamespaceException e) { + log.warn( + "Cannot get quota information out of GPFS. Using the TotalOnlineSize in namespace.xml " + + "for Storage Area {}.", + storageAreaName, e); + } + } + + // ******************* Update SA Catalog *************************** + private void updateSA() { + + TSpaceToken spaceToken = null; + // ReservedSpaceCatalog spaceCatalog = new ReservedSpaceCatalog(); + SpaceHelper spaceHelp = new SpaceHelper(); + log.debug("Updating Space Catalog with Storage Area defined within NAMESPACE"); + VirtualFS vfs = null; + Iterator scan = vfss.values().iterator(); + while (scan.hasNext()) { + + vfs = (VirtualFS) scan.next(); + String vfsAliasName = vfs.getAliasName(); + log.debug(" Considering VFS : {}", vfsAliasName); + String aliasName = vfs.getSpaceTokenDescription(); + if (aliasName == null) { + // Found a VFS without the optional element Space Token Description + log.debug( + "XMLNamespaceParser.UpdateSA() : Found a VFS ('{}') without space-token-description. " + + "Skipping the Update of SA", + vfsAliasName); + } else { + TSizeInBytes onlineSize = vfs.getProperties().getTotalOnlineSize(); + String spaceFileName = vfs.getRootPath(); + spaceToken = spaceHelp.createVOSA_Token(aliasName, onlineSize, spaceFileName); + vfs.setSpaceToken(spaceToken); + + log.debug(" Updating SA ('{}'), token:'{}', onlineSize:'{}', spaceFileName:'{}'", aliasName, + spaceToken, onlineSize, spaceFileName); + } + + } + spaceHelp.purgeOldVOSA_token(); + log.debug("Updating Space Catalog... DONE!!"); + + } + + // ******************* VERSION NUMBER *************************** + private void retrieveVersion() throws NamespaceException { + + version = parserUtil.getNamespaceVersion(); + log.debug(" ==== NAMESPACE VERSION : '{}' ====", version); + } + + // ******************* VIRTUAL FS *************************** + + private void buildVFSs() throws ClassNotFoundException, NamespaceException { + + int nrOfVFS = parserUtil.getNumberOfFS(); + // For each VFS within configuration build VFS class instance + // VirtualFS vfs; + // String spaceTokenDescription = null; + // StorageClassType storageClass; + // String root = null; + // String name; + // String fsType; + // Class driver; + // String storageAreaAuthz; + // PropertyInterface prop; + // CapabilityInterface cap; + // DefaultValuesInterface defValues; + // SAAuthzType saAuthzType; + + for (int i = 0; i < nrOfVFS; i++) { + // Building VFS + VirtualFS vfs = new VirtualFS(); + // name + String name = parserUtil.getFSName(i); + vfs.setAliasName(name); + log.debug("VFS({}).name = '{}'", i, name); + // fs type + String fsType = parserUtil.getFSType(name); + vfs.setFSType(fsType); + log.debug("VFS({}).fs_type = '{}'", name, fsType); + // space token + String spaceTokenDescription = parserUtil.getFSSpaceTokenDescription(name); + vfs.setSpaceTokenDescription(spaceTokenDescription); + log.debug("VFS({}).space-token-description = '{}'", name, spaceTokenDescription); + // storage class + StorageClassType storageClass = + StorageClassType.getStorageClassType(parserUtil.getStorageClass(name)); + vfs.setStorageClassType(storageClass); + log.debug("VFS({}).storage-class = '{}'", name, storageClass); + // root path + String root = parserUtil.getFSRoot(name); + vfs.setRoot(root); + log.debug("VFS({}).root = '{}'", name, root); + // fs driver + Class fsDriver = Class.forName(parserUtil.getFSDriver(name)); + vfs.setFSDriver(fsDriver); + log.debug("VFS({}).fsDriver [CLASS Name] = '{}'", name, fsDriver.getName()); + // space driver + Class spaceDriver = Class.forName(parserUtil.getSpaceDriver(name)); + vfs.setSpaceSystemDriver(spaceDriver); + log.debug("VFS({}).spaceDriver [CLASS Name] = '{}'", name, spaceDriver.getName()); + // authz type + SAAuthzType saAuthzType = parserUtil.getStorageAreaAuthzType(name); + vfs.setSAAuthzType(saAuthzType); + log.debug("VFS({}).storage-area-authz.TYPE = '{}'", name, saAuthzType); + // storage area authz + String storageAreaAuthz = parserUtil.getStorageAreaAuthz(name, saAuthzType); + vfs.setSAAuthzSource(storageAreaAuthz); + log.debug("VFS({}).storage-area-authz = '{}'", name, storageAreaAuthz); + // properties + PropertyInterface prop = buildProperties(name); + vfs.setProperties(prop); + // capabilities + Capability cap = buildCapabilities(name); + vfs.setCapabilities(cap); + + DefaultValuesInterface defValues = buildDefaultValues(name); + vfs.setDefaultValues(defValues); + + // Adding VFS + synchronized (this) { + vfss.remove(name); + vfss.put(name, vfs); + } + } + } + + // ******************* PROPERTY *************************** + private PropertyInterface buildProperties(String fsName) throws NamespaceException { + + Property prop = new Property(); + + String accessLatency = parserUtil.getAccessLatencyType(fsName); + prop.setAccessLatency(accessLatency); + log.debug("VFS({}).Properties.AccessLatency = '{}'", fsName, accessLatency); + + String expirationMode = parserUtil.getExpirationModeType(fsName); + prop.setExpirationMode(expirationMode); + log.debug("VFS({}).Properties.ExpirationMode = '{}'", fsName, expirationMode); + + String retentionPolicy = parserUtil.getRetentionPolicyType(fsName); + prop.setRetentionPolicy(retentionPolicy); + log.debug("VFS({}).Properties.RetentionPolicy = '{}'", fsName, retentionPolicy); + + String unitType = parserUtil.getNearlineSpaceUnitType(fsName); + long nearLineSize = parserUtil.getNearlineSpaceSize(fsName); + prop.setTotalNearlineSize(unitType, nearLineSize); + log.debug("VFS({}).Properties.NearlineSpaceSize = '{} {}'", fsName, nearLineSize, unitType); + + unitType = parserUtil.getOnlineSpaceUnitType(fsName); + long onlineSize = parserUtil.getOnlineSpaceSize(fsName); + prop.setTotalOnlineSize(unitType, onlineSize); + log.debug("VFS({}).Properties.OnlineSpaceSize = '{} {}'", fsName, onlineSize, unitType); + + boolean hasLimitedSize = parserUtil.getOnlineSpaceLimitedSize(fsName); + prop.setLimitedSize(hasLimitedSize); + log.debug("VFS({}).Properties.OnlineSpaceLimitedSize = '{}'", fsName, hasLimitedSize); + + return prop; + } + + // ******************* CAPABILITY *************************** + + private Capability buildCapabilities(String fsName) throws NamespaceException { + + /** + * ACL MODE ELEMENT + */ + ACLMode aclMode = ACLMode.makeFromString(parserUtil.getACLMode(fsName)); + Capability cap = new Capability(aclMode); + log.debug("VFS({}).Capabilities.aclMode = '{}'", fsName, aclMode); + + /** + * DEFAULT ACL + */ + boolean defaultACLDefined = parserUtil.getDefaultACLDefined(fsName); + log.debug("VFS({}).Capabilities.defaultACL [Defined?] = {}", fsName, defaultACLDefined); + if (defaultACLDefined) { + int nrACLEntries = parserUtil.getNumberOfACL(fsName); + String groupName = null; + String filePermString = null; + ACLEntry aclEntry = null; + for (int entryNumber = 0; entryNumber < nrACLEntries; entryNumber++) { + groupName = parserUtil.getGroupName(fsName, entryNumber); + filePermString = parserUtil.getPermissionString(fsName, entryNumber); + try { + aclEntry = new ACLEntry(groupName, filePermString); + cap.addACLEntry(aclEntry); + } catch (PermissionException permEx) { + log.error("Namespace XML Parser -- ERROR -- : {}", permEx.getMessage()); + } + } + log.debug("VFS({}).Capabilities.defaultACL = {}", fsName, cap.getDefaultACL()); + } + + /** + * QUOTA ELEMENT + */ + boolean quotaDefined = parserUtil.getQuotaDefined(fsName); + Quota quota = null; + if (quotaDefined) { + boolean quotaEnabled = parserUtil.getQuotaEnabled(fsName); + String device = parserUtil.getQuotaDevice(fsName); + + QuotaType quotaType; + String quotaValue = null; + + if (parserUtil.getQuotaFilesetDefined(fsName)) { + quotaType = QuotaType.buildQuotaType(QuotaType.FILESET); + quotaValue = parserUtil.getQuotaFileset(fsName); + } else { + if (parserUtil.getQuotaGroupIDDefined(fsName)) { + quotaType = QuotaType.buildQuotaType(QuotaType.GRP); + quotaValue = parserUtil.getQuotaGroupID(fsName); + } else { + if (parserUtil.getQuotaUserIDDefined(fsName)) { + quotaType = QuotaType.buildQuotaType(QuotaType.USR); + quotaValue = parserUtil.getQuotaUserID(fsName); + } else { + quotaType = QuotaType.buildQuotaType(QuotaType.UNKNOWN); + quotaValue = "unknown"; + } + } + } + + quotaType.setValue(quotaValue); + quota = new Quota(quotaEnabled, device, quotaType); + + } else { + quota = new Quota(); + } + cap.setQuota(quota); + + log.debug("VFS({}).Capabilities.quota = '{}'", fsName, quota); + + /** + * TRANSFER PROTOCOL + */ + int nrProtocols = parserUtil.getNumberOfProt(fsName); + for (int protCounter = 0; protCounter < nrProtocols; protCounter++) { + int protocolIndex = parserUtil.getProtId(fsName, protCounter); + String name = parserUtil.getProtName(fsName, protCounter); + String schema = parserUtil.getProtSchema(fsName, protCounter); + Protocol protocol = Protocol.getProtocol(schema); + protocol.setProtocolServiceName(name); + String serviceHostName = parserUtil.getProtHost(fsName, protCounter); + String servicePortValue = parserUtil.getProtPort(fsName, protCounter); + int portIntValue = -1; + Authority service = null; + if (servicePortValue != null) { + try { + portIntValue = Integer.parseInt(servicePortValue); + service = new Authority(serviceHostName, portIntValue); + } catch (NumberFormatException nfe) { + log.warn("to evaluate the environmental variable " + servicePortValue); + } + } else { + service = new Authority(serviceHostName); + } + TransportProtocol transportProt = new TransportProtocol(protocol, service); + transportProt.setProtocolID(protocolIndex); + log.debug("VFS({}).Capabilities.protocol({}) = '{}'", fsName, protCounter, transportProt); + cap.addTransportProtocolByScheme(protocol, transportProt); + cap.addTransportProtocol(transportProt); + if (protocolIndex != -1) { + cap.addTransportProtocolByID(protocolIndex, transportProt); + } + + } + + /** + * PROTOCOL POOL + */ + int nrPools = parserUtil.getNumberOfPool(fsName); + if (nrPools > 0) { + + for (int poolCounter = 0; poolCounter < nrPools; poolCounter++) { + BalancingStrategyType balanceStrategy = + BalancingStrategyType.getByValue(parserUtil.getBalancerStrategy(fsName, poolCounter)); + List poolMembers = Lists.newArrayList(); + int nrMembers = parserUtil.getNumberOfPoolMembers(fsName, poolCounter); + for (int i = 0; i < nrMembers; i++) { + int protIndex = parserUtil.getMemberID(fsName, poolCounter, i); + TransportProtocol tProtMember = cap.getProtocolByID(protIndex); + if (tProtMember != null) { + PoolMember poolMember; + if (balanceStrategy.requireWeight()) { + int memberWeight = parserUtil.getMemberWeight(fsName, poolCounter, i); + poolMember = new PoolMember(protIndex, tProtMember, memberWeight); + } else { + poolMember = new PoolMember(protIndex, tProtMember); + } + poolMembers.add(poolMember); + } else { // member pointed out doesn't exist!! + String errorMessage = String.format( + "POOL Building: Protocol with index %d does not exists in the VFS : %s", protIndex, + fsName); + log.error(errorMessage); + throw new NamespaceException(errorMessage); + } + } + verifyPoolIsValid(poolMembers); + Protocol poolProtocol = poolMembers.get(0).getMemberProtocol().getProtocol(); + log.debug("Defined pool for protocol {} with size {}", poolProtocol, poolMembers.size()); + ProtocolPool pool = new ProtocolPool(balanceStrategy, poolMembers); + cap.addProtocolPool(pool); + cap.addProtocolPoolBySchema(poolProtocol, pool); + log.debug("PROTOCOL POOL: {}", cap.getPoolByScheme(poolProtocol)); + } + } else { + log.debug("Pool is not defined in VFS {}", fsName); + } + + return cap; + } + + /** + * @param poolMembers + * @throws NamespaceException + */ + private void verifyPoolIsValid(List poolMembers) throws NamespaceException { + + if (poolMembers.isEmpty()) { + throw new NamespaceException("POOL Defined is EMPTY!"); + } + Protocol prot = poolMembers.get(0).getMemberProtocol().getProtocol(); + for (PoolMember member : poolMembers) { + if (!(member.getMemberProtocol().getProtocol().equals(prot))) { + throw new NamespaceException("Defined Pool is NOT HOMOGENEOUS! Protocols " + prot.toString() + + " and " + member.toString() + " differs"); + } + } + } + + // ******************* DEFAULT VALUES *************************** + + private DefaultValuesInterface buildDefaultValues(String fsName) throws NamespaceException { + + DefaultValues def = new DefaultValues(); + if (parserUtil.isDefaultElementPresent(fsName)) { + setSpaceDef(fsName, def); + setFileDef(fsName, def); + } else { // Produce Default Values with default values :o ! + log.debug("VFS({}).DefaultValues is ABSENT. Using DEFAULT values.", fsName); + } + return def; + } + + private void setSpaceDef(String fsName, DefaultValues def) throws NamespaceException { + + String spaceType = parserUtil.getDefaultSpaceType(fsName); + log.debug("VFS({}).DefaultValues.space.type = '{}'", fsName, spaceType); + long lifeTime = parserUtil.getDefaultSpaceLifeTime(fsName); + log.debug("VFS({}).DefaultValues.space.lifeTime = ''", fsName, lifeTime); + long guarSize = parserUtil.getDefaultSpaceGuarSize(fsName); + log.debug("VFS({}).DefaultValues.space.guarSize = '{}'", fsName, guarSize); + long totSize = parserUtil.getDefaultSpaceTotSize(fsName); + log.debug("VFS({}).DefaultValues.space.totSize = '{}'", fsName, totSize); + def.setSpaceDefaults(spaceType, lifeTime, guarSize, totSize); + } + + private void setFileDef(String fsName, DefaultValues def) throws NamespaceException { + + String fileType = parserUtil.getDefaultFileType(fsName); + log.debug("VFS({}).DefaultValues.file.type = '{}'", fsName, fileType); + long lifeTime = parserUtil.getDefaultFileLifeTime(fsName); + log.debug("VFS({}).DefaultValues.file.lifeTime = '{}'", fsName, lifeTime); + def.setFileDefaults(fileType, lifeTime); + } + + // ******************* MAPPING RULE *************************** + + private void buildMapRules() throws NamespaceException { + + int numOfMapRules = parserUtil.getNumberOfMappingRule(); + String ruleName; + String stfnRoot; + String mappedFS; + MappingRule mapRule; + + for (int i = 0; i < numOfMapRules; i++) { + ruleName = parserUtil.getMapRuleName(i); + mappedFS = parserUtil.getMapRule_mappedFS(ruleName); + // Adding mapping rule to VFS within vfss; + if (vfss.containsKey(mappedFS)) { + log.debug("VFS '{}' pointed by RULE : '{}' exists.", mappedFS, ruleName); + stfnRoot = parserUtil.getMapRule_StFNRoot(ruleName); + VirtualFS vfs = vfss.get(mappedFS); + mapRule = new MappingRule(ruleName, stfnRoot, vfs); + ((VirtualFS) vfs).addMappingRule(mapRule); + maprules.put(ruleName, mapRule); + } else { + log.error("VFS '{}' pointed by RULE : '{}' DOES NOT EXISTS.", mappedFS, ruleName); + } + } + } + + // ******************* APPROACHABLE RULE *************************** + + private void buildAppRules() throws NamespaceException { + + int numOfAppRules = parserUtil.getNumberOfApproachRule(); + + String ruleName; + String dn; + String vo_name; + String relPath; + String anonymousHttpReadString; + List appFSList; + ApproachableRule appRule; + + log.debug("Number of APP Rule : {}", numOfAppRules); + + + for (int i = 0; i < numOfAppRules; i++) { + ruleName = parserUtil.getApproachRuleName(i); + log.debug(" APP rule nr: {} is named : {}", i, ruleName); + + dn = parserUtil.getAppRule_SubjectDN(ruleName); + vo_name = parserUtil.getAppRule_SubjectVO(ruleName); + SubjectRules subjectRules = new SubjectRules(dn, vo_name); + + relPath = parserUtil.getAppRule_RelativePath(ruleName); + + anonymousHttpReadString = parserUtil.getAppRule_AnonymousHttpRead(ruleName); + if (anonymousHttpReadString != null && !anonymousHttpReadString.trim().isEmpty()) { + appRule = new ApproachableRule(ruleName, subjectRules, relPath, + Boolean.parseBoolean(anonymousHttpReadString)); + } else { + appRule = new ApproachableRule(ruleName, subjectRules, relPath); + } + + appFSList = parserUtil.getAppRule_AppFS(ruleName); + for (String appFS : appFSList) { + if (vfss.containsKey(appFS)) { + log.debug("VFS '{}' pointed by RULE : '{}' exists.", appFS, ruleName); + VirtualFS vfs = vfss.get(appFS); + ((VirtualFS) vfs).addApproachableRule(appRule); + appRule.addApproachableVFS(vfs); + } else { + log.error("VFS '{}' pointed by RULE : '{}' DOES NOT EXISTS.", appFS, ruleName); + } + } + apprules.put(ruleName, appRule); + } + } + + /***************************************************************************** + * BUSINESS METHODs + ****************************************************************************/ + + public String getNamespaceVersion() { + + return version; + } + + public List getAllVFS_Roots() { + + Collection elem = vfss.values(); + List roots = new ArrayList<>(vfss.size()); + Iterator scan = elem.iterator(); + while (scan.hasNext()) { + String root = null; + root = scan.next().getRootPath(); + roots.add(root); + } + return roots; + } + + public Map getMapVFS_Root() { + + Map result = new HashMap<>(); + Collection elem = vfss.values(); + Iterator scan = elem.iterator(); + while (scan.hasNext()) { + String root = null; + VirtualFS vfs = scan.next(); + root = vfs.getRootPath(); + result.put(root, vfs); + } + return result; + } + + public List getAllMappingRule_StFNRoots() { + + Collection elem = maprules.values(); + List roots = new ArrayList<>(maprules.size()); + Iterator scan = elem.iterator(); + String root = null; + while (scan.hasNext()) { + root = scan.next().getStFNRoot(); + roots.add(root); + } + return roots; + } + + public Map getMappingRuleMAP() { + + Map map = new HashMap<>(); + Collection elem = maprules.values(); + Iterator scan = elem.iterator(); + String root = null; + String name = null; + MappingRule rule; + while (scan.hasNext()) { + rule = scan.next(); + root = rule.getStFNRoot(); + name = rule.getRuleName(); + map.put(name, root); + } + return map; + } + + public VirtualFS getVFS(String vfsName) { + + return vfss.get(vfsName); + } } diff --git a/src/main/java/it/grid/storm/namespace/config/xml/XMLParserUtil.java b/src/main/java/it/grid/storm/namespace/config/xml/XMLParserUtil.java index e25522346..715cac617 100644 --- a/src/main/java/it/grid/storm/namespace/config/xml/XMLParserUtil.java +++ b/src/main/java/it/grid/storm/namespace/config/xml/XMLParserUtil.java @@ -1,13 +1,22 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. */ -package it.grid.storm.namespace.config.xml; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.model.SAAuthzType; +package it.grid.storm.namespace.config.xml; -import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; import java.util.List; @@ -21,1179 +30,905 @@ import com.google.common.collect.Lists; -/** - *

- * Title: - *

- * - *

- * Description: - *

- * - *

- * Copyright: Copyright (c) 2006 - *

- * - *

- * Company: INFN-CNAF and ICTP/eGrid project - *

- * - * @author Riccardo Zappi - * @version 1.0 - */ +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.model.SAAuthzType; + public class XMLParserUtil implements XMLConst { - private final HierarchicalConfiguration configuration; - private final Logger log = LoggerFactory.getLogger(XMLParserUtil.class); - - public XMLParserUtil(Configuration config) { - - configuration = (HierarchicalConfiguration) config; - } - - /***************************************************************************** - * GENERICS METHODS - */ - - public boolean validateXML() { - - return true; - } - - public boolean areThereSustitutionCharInside(String element) { - - boolean result = false; - result = (element.indexOf(XMLConst.PROT_SUB_PATTERN) != -1) - || (element.indexOf(XMLConst.FS_SUB_PATTERN) != -1) - || (element.indexOf(XMLConst.APPRULE_SUB_PATTERN) != -1) - || (element.indexOf(XMLConst.MAP_SUB_PATTERN) != -1) - || (element.indexOf(XMLConst.ACL_ENTRY_SUB_PATTERN) != -1); - return result; - } - - public char whicSubstitutionChar(String element) { - - if (element.indexOf(XMLConst.PROT_SUB_PATTERN) != -1) { - return XMLConst.PROT_SUB_PATTERN; - } else if (element.indexOf(XMLConst.FS_SUB_PATTERN) != -1) { - return XMLConst.FS_SUB_PATTERN; - } else if (element.indexOf(XMLConst.APPRULE_SUB_PATTERN) != -1) { - return APPRULE_SUB_PATTERN; - } else if (element.indexOf(XMLConst.MAP_SUB_PATTERN) != -1) { - return XMLConst.MAP_SUB_PATTERN; - } else if (element.indexOf(XMLConst.ACL_ENTRY_SUB_PATTERN) != -1) { - return XMLConst.ACL_ENTRY_SUB_PATTERN; - } else if (element.indexOf(XMLConst.MEMBER_SUB_PATTERN) != -1) { - return XMLConst.MEMBER_SUB_PATTERN; - } - return ' '; - } - - /***************************************************************************** - * FILESYSTEMS METHODS - */ - public String getNamespaceVersion() throws NamespaceException { - - String result = null; - result = getStringProperty(XMLConst.NAMESPACE_VERSION); - return result; - } - - public String getFSSpaceTokenDescription(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - String result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.FS_SPACE_TOKEN_DESCRIPTION)); - return result; - } - - /** - * public String getAuthorizationSource(String nameOfFS) throws - * NamespaceException { int numOfFS = retrieveNumberByName(nameOfFS, - * XMLConst.FS_BY_NAME); String result = null; //Optional element if - * (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.FS_AUTHZ))) { - * result = getStringProperty(substituteNumberInFSElement(numOfFS, - * XMLConst.FS_AUTHZ)); } else { //Default value needed. result = - * XMLConst.DEFAULT_AUTHZ_SOURCE; - * log.debug("AuthZ source for VFS(+'"+nameOfFS+ - * "') is absent. Default value ('"+result+"') will be used."); } return - * result; } - **/ - - /** - * public boolean getQuotaCheck(String nameOfFS) throws NamespaceException { - * int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); boolean - * result = false; //Optional element if - * (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_CHECK))) { - * result = getBooleanProperty(substituteNumberInFSElement(numOfFS, - * XMLConst.QUOTA_CHECK)); } else { //Default value needed. result = - * XMLConst.DEFAULT_CHECKING_QUOTA; - * log.debug("Checking quota flag in VFS(+'"+nameOfFS - * +"') is absent. Default value ('"+result+"') will be used."); } return - * result; } - **/ - - public String getRetentionPolicyType(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - String result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.RETENTION_POLICY)); - return result; - } - - public String getAccessLatencyType(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - String result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.ACCESS_LATENCY)); - return result; - } - - public String getExpirationModeType(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - String result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.EXPIRATION_MODE)); - return result; - } - - public String getOnlineSpaceUnitType(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - String result = null; - // Optional element - if (isPresent(substituteNumberInFSElement(numOfFS, - XMLConst.ONLINE_SIZE_UNIT))) { - result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.ONLINE_SIZE_UNIT)); - } else { // Default value needed. - result = XMLConst.DEFAULT_UNIT_TYPE; - log.debug("Online Space Unit type for VFS(+'" + nameOfFS - + "') is absent. Default value ('" + result + "') will be used"); - } - return result; - } - - public long getOnlineSpaceSize(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - long result = getLongProperty(substituteNumberInFSElement(numOfFS, - XMLConst.ONLINE_SIZE)); - return result; - } - - public String getNearlineSpaceUnitType(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - String result = null; - // Optional element - if (isPresent(substituteNumberInFSElement(numOfFS, - XMLConst.NEARLINE_SIZE_UNIT))) { - result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.NEARLINE_SIZE_UNIT)); - } else { // Default value needed. - result = XMLConst.DEFAULT_UNIT_TYPE; - log.debug("Online Space Unit type for VFS(+'" + nameOfFS - + "') is absent. Default value ('" + result + "') will be used"); - } - return result; - } - - public long getNearlineSpaceSize(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - long result = getLongProperty(substituteNumberInFSElement(numOfFS, - XMLConst.NEARLINE_SIZE)); - return result; - } - - public int getNumberOfFS() throws NamespaceException { - - return getPropertyNumber(XMLConst.FS_COUNTING); - } - - public String getFSName(int numOfFS) throws NamespaceException { - - return getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.FILESYSTEM_NAME)); - } - - public int getFSNumber(String nameOfFS) throws NamespaceException { - - return retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - } - - public String getFSType(String nameOfFS) throws NamespaceException { - - // log.debug("-----FSTYPE------START"); - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - // log.debug("-----FSTYPE------END"); - return getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.FILESYSTEM_TYPE)); - } - - public String getFSRoot(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - String result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.FS_ROOT)); - // log.debug("VFS ROOT = "+result); - return result; - } - - public String getFSDriver(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - return getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.FS_DRIVER)); - } - - public String getSpaceDriver(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - return getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.FS_SPACE_DRIVER)); - } - - public boolean isDefaultElementPresent(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - // FS_DEFAULTVALUES - result = isPresent(substituteNumberInFSElement(numOfFS, - XMLConst.FS_DEFAULTVALUES)); - return result; - } - - public String getDefaultSpaceType(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - return getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.DEF_SPACE_TYPE)); - } + private final HierarchicalConfiguration configuration; + private final Logger log = LoggerFactory.getLogger(XMLParserUtil.class); + + public XMLParserUtil(Configuration config) { + + configuration = (HierarchicalConfiguration) config; + } + + public boolean validateXML() { + + return true; + } + + public boolean areThereSustitutionCharInside(String element) { + + boolean result = false; + result = (element.indexOf(XMLConst.PROT_SUB_PATTERN) != -1) + || (element.indexOf(XMLConst.FS_SUB_PATTERN) != -1) + || (element.indexOf(XMLConst.APPRULE_SUB_PATTERN) != -1) + || (element.indexOf(XMLConst.MAP_SUB_PATTERN) != -1) + || (element.indexOf(XMLConst.ACL_ENTRY_SUB_PATTERN) != -1); + return result; + } + + public char whicSubstitutionChar(String element) { + + if (element.indexOf(XMLConst.PROT_SUB_PATTERN) != -1) { + return XMLConst.PROT_SUB_PATTERN; + } else if (element.indexOf(XMLConst.FS_SUB_PATTERN) != -1) { + return XMLConst.FS_SUB_PATTERN; + } else if (element.indexOf(XMLConst.APPRULE_SUB_PATTERN) != -1) { + return APPRULE_SUB_PATTERN; + } else if (element.indexOf(XMLConst.MAP_SUB_PATTERN) != -1) { + return XMLConst.MAP_SUB_PATTERN; + } else if (element.indexOf(XMLConst.ACL_ENTRY_SUB_PATTERN) != -1) { + return XMLConst.ACL_ENTRY_SUB_PATTERN; + } else if (element.indexOf(XMLConst.MEMBER_SUB_PATTERN) != -1) { + return XMLConst.MEMBER_SUB_PATTERN; + } + return ' '; + } + + public String getNamespaceVersion() throws NamespaceException { + + String result = null; + result = getStringProperty(XMLConst.NAMESPACE_VERSION); + return result; + } + + public String getFSSpaceTokenDescription(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + String result = getStringProperty( + substituteNumberInFSElement(numOfFS, XMLConst.FS_SPACE_TOKEN_DESCRIPTION)); + return result; + } + + public String getRetentionPolicyType(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + String result = + getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.RETENTION_POLICY)); + return result; + } + + public String getAccessLatencyType(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + String result = + getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.ACCESS_LATENCY)); + return result; + } + + public String getExpirationModeType(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + String result = + getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.EXPIRATION_MODE)); + return result; + } + + public String getOnlineSpaceUnitType(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + String result = null; + // Optional element + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.ONLINE_SIZE_UNIT))) { + result = getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.ONLINE_SIZE_UNIT)); + } else { // Default value needed. + result = XMLConst.DEFAULT_UNIT_TYPE; + log.debug("Online Space Unit type for VFS(+'" + nameOfFS + "') is absent. Default value ('" + + result + "') will be used"); + } + return result; + } + + public long getOnlineSpaceSize(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + long result = getLongProperty(substituteNumberInFSElement(numOfFS, XMLConst.ONLINE_SIZE)); + return result; + } + + public String getNearlineSpaceUnitType(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + String result = null; + // Optional element + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.NEARLINE_SIZE_UNIT))) { + result = getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.NEARLINE_SIZE_UNIT)); + } else { // Default value needed. + result = XMLConst.DEFAULT_UNIT_TYPE; + log.debug("Online Space Unit type for VFS(+'" + nameOfFS + "') is absent. Default value ('" + + result + "') will be used"); + } + return result; + } + + public long getNearlineSpaceSize(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + long result = getLongProperty(substituteNumberInFSElement(numOfFS, XMLConst.NEARLINE_SIZE)); + return result; + } + + public int getNumberOfFS() throws NamespaceException { + + return getPropertyNumber(XMLConst.FS_COUNTING); + } + + public String getFSName(int numOfFS) throws NamespaceException { + + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.FILESYSTEM_NAME)); + } + + public int getFSNumber(String nameOfFS) throws NamespaceException { + + return retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + } + + public String getFSType(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.FILESYSTEM_TYPE)); + } + + public String getFSRoot(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.FS_ROOT)); + } + + public String getFSDriver(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.FS_DRIVER)); + } + + public String getSpaceDriver(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.FS_SPACE_DRIVER)); + } + + public boolean isDefaultElementPresent(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return isPresent(substituteNumberInFSElement(numOfFS, XMLConst.FS_DEFAULTVALUES)); + } + + public String getDefaultSpaceType(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.DEF_SPACE_TYPE)); + } + + public long getDefaultSpaceLifeTime(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return getLongProperty(substituteNumberInFSElement(numOfFS, XMLConst.DEF_SPACE_LT)); + } + + public long getDefaultSpaceGuarSize(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return getLongProperty(substituteNumberInFSElement(numOfFS, XMLConst.DEF_SPACE_GUARSIZE)); + } + + public long getDefaultSpaceTotSize(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return getLongProperty(substituteNumberInFSElement(numOfFS, XMLConst.DEF_SPACE_TOTSIZE)); + } + + public String getDefaultFileType(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.DEF_FILE_TYPE)); + } + + public long getDefaultFileLifeTime(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return getLongProperty(substituteNumberInFSElement(numOfFS, XMLConst.DEF_FILE_LT)); + } + + public String getACLMode(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.ACL_MODE)); + } + + public int getNumberOfProt(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + if (numOfFS == -1) { + throw new NamespaceException("FS named '" + nameOfFS + "' does not exist in config"); + } + String protCount = + substitutionNumber(XMLConst.PROTOCOL_COUNTING, XMLConst.FS_SUB_PATTERN, numOfFS); + return getPropertyNumber(protCount); + } + + public String getProtName(String nameOfFS, int numOfProt) throws NamespaceException { + + return getStringProperty( + substituteNumberInProtocolElement(nameOfFS, numOfProt, XMLConst.PROTOCOL_NAME)); + } + + public int getProtNumberByName(String nameOfFS, String nameOfProt) throws NamespaceException { + + int numFS = getFSNumber(nameOfFS); + String collElem = substituteNumberInFSElement(numFS, XMLConst.PROTOCOL_BY_NAME); + return retrieveNumberByName(nameOfProt, collElem); + } + + public String getProtSchema(String nameOfFS, int numOfProt) throws NamespaceException { + + return getStringProperty( + substituteNumberInProtocolElement(nameOfFS, numOfProt, XMLConst.PROT_SCHEMA)); + } + + public String getProtHost(String nameOfFS, int numOfProt) throws NamespaceException { + + return getStringProperty( + substituteNumberInProtocolElement(nameOfFS, numOfProt, XMLConst.PROT_HOST)); + } + + public String getProtPort(String nameOfFS, int numOfProt) throws NamespaceException { - public long getDefaultSpaceLifeTime(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - return getLongProperty(substituteNumberInFSElement(numOfFS, - XMLConst.DEF_SPACE_LT)); - } + return getStringProperty( + substituteNumberInProtocolElement(nameOfFS, numOfProt, XMLConst.PROT_PORT)); + } - public long getDefaultSpaceGuarSize(String nameOfFS) - throws NamespaceException { + /* + * MAPPING RULES METHODS + */ - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - return getLongProperty(substituteNumberInFSElement(numOfFS, - XMLConst.DEF_SPACE_GUARSIZE)); - } + public int getNumberOfMappingRule() throws NamespaceException { - public long getDefaultSpaceTotSize(String nameOfFS) throws NamespaceException { + return getPropertyNumber(XMLConst.MAP_RULE_COUNTING); + } - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - return getLongProperty(substituteNumberInFSElement(numOfFS, - XMLConst.DEF_SPACE_TOTSIZE)); - } + public String getMapRuleName(int numOfMapRule) throws NamespaceException { - public String getDefaultFileType(String nameOfFS) throws NamespaceException { + return getStringProperty(substituteNumberInMAPElement(numOfMapRule, XMLConst.MAP_RULE_NAME)); + } - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - return getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.DEF_FILE_TYPE)); - } + public String getMapRule_StFNRoot(String nameOfMapRule) throws NamespaceException { - public long getDefaultFileLifeTime(String nameOfFS) throws NamespaceException { + int numOfMapRule = retrieveNumberByName(nameOfMapRule, XMLConst.MAP_RULE_BY_NAME); + return getStringProperty( + substituteNumberInMAPElement(numOfMapRule, XMLConst.MAP_RULE_STFNROOT)); + } - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - return getLongProperty(substituteNumberInFSElement(numOfFS, - XMLConst.DEF_FILE_LT)); - } + public String getMapRule_mappedFS(String nameOfMapRule) throws NamespaceException { - public String getACLMode(String nameOfFS) throws NamespaceException { + int numOfMapRule = retrieveNumberByName(nameOfMapRule, XMLConst.MAP_RULE_BY_NAME); + return getStringProperty( + substituteNumberInMAPElement(numOfMapRule, XMLConst.MAP_RULE_MAPPED_FS)); + } - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - return getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.ACL_MODE)); - } + /* + * APPROACHING METHODS + */ - public int getNumberOfProt(String nameOfFS) throws NamespaceException { + public int getNumberOfApproachRule() throws NamespaceException { - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - if (numOfFS == -1) { - throw new NamespaceException("FS named '" + nameOfFS - + "' does not exist in config"); - } - String protCount = substitutionNumber(XMLConst.PROTOCOL_COUNTING, - XMLConst.FS_SUB_PATTERN, numOfFS); - // log.debug( configuration.getString(protCount)); - return getPropertyNumber(protCount); - } + return getPropertyNumber(XMLConst.APP_RULE_COUNTING); + } - public String getProtName(String nameOfFS, int numOfProt) - throws NamespaceException { + public String getApproachRuleName(int numOfAppRule) throws NamespaceException { - return getStringProperty(substituteNumberInProtocolElement(nameOfFS, - numOfProt, XMLConst.PROTOCOL_NAME)); - } + return getStringProperty(substituteNumberInAPPElement(numOfAppRule, XMLConst.APP_RULE_NAME)); + } - public int getProtNumberByName(String nameOfFS, String nameOfProt) - throws NamespaceException { + public String getAppRule_SubjectDN(String nameOfAppRule) throws NamespaceException { - int numFS = getFSNumber(nameOfFS); - String collElem = substituteNumberInFSElement(numFS, - XMLConst.PROTOCOL_BY_NAME); - // log.debug("COLLECTION = "+collElem); - return retrieveNumberByName(nameOfProt, collElem); - } + int numOfAppRule = retrieveNumberByName(nameOfAppRule, XMLConst.APP_RULE_BY_NAME); + return getStringProperty(substituteNumberInAPPElement(numOfAppRule, XMLConst.APP_DN)); + } - public String getProtSchema(String nameOfFS, int numOfProt) - throws NamespaceException { + public String getAppRule_SubjectVO(String nameOfAppRule) throws NamespaceException { - return getStringProperty(substituteNumberInProtocolElement(nameOfFS, - numOfProt, XMLConst.PROT_SCHEMA)); - } + int numOfAppRule = retrieveNumberByName(nameOfAppRule, XMLConst.APP_RULE_BY_NAME); + return getStringProperty(substituteNumberInAPPElement(numOfAppRule, XMLConst.APP_VO_NAME)); + } - public String getProtHost(String nameOfFS, int numOfProt) - throws NamespaceException { + public List getAppRule_AppFS(String nameOfAppRule) throws NamespaceException { - return getStringProperty(substituteNumberInProtocolElement(nameOfFS, - numOfProt, XMLConst.PROT_HOST)); - } + int numOfAppRule = retrieveNumberByName(nameOfAppRule, XMLConst.APP_RULE_BY_NAME); + return getListValue(substituteNumberInAPPElement(numOfAppRule, XMLConst.APPROACHABLE_FS)); + } - public String getProtPort(String nameOfFS, int numOfProt) - throws NamespaceException { + public String getAppRule_RelativePath(String nameOfAppRule) throws NamespaceException { - return getStringProperty(substituteNumberInProtocolElement(nameOfFS, - numOfProt, XMLConst.PROT_PORT)); - } + int numOfAppRule = retrieveNumberByName(nameOfAppRule, XMLConst.APP_RULE_BY_NAME); + return getStringProperty( + substituteNumberInAPPElement(numOfAppRule, XMLConst.APP_SPACE_REL_PATH)); + } - /***************************************************************************** - * MAPPING RULES METHODS - */ + public String getAppRule_AnonymousHttpRead(String nameOfAppRule) throws NamespaceException { - public int getNumberOfMappingRule() throws NamespaceException { + int numOfAppRule = retrieveNumberByName(nameOfAppRule, XMLConst.APP_RULE_BY_NAME); + return getStringProperty( + substituteNumberInAPPElement(numOfAppRule, XMLConst.APP_ANONYMOUS_HTTP_READ)); + } - return getPropertyNumber(XMLConst.MAP_RULE_COUNTING); - } + /* + * QUOTA METHODS + */ - public String getMapRuleName(int numOfMapRule) throws NamespaceException { + public boolean getQuotaDefined(String nameOfFS) throws NamespaceException { - return getStringProperty(substituteNumberInMAPElement(numOfMapRule, - XMLConst.MAP_RULE_NAME)); - } + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + boolean result = false; + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_ENABLED))) { + result = true; + } + return result; + } - public String getMapRule_StFNRoot(String nameOfMapRule) - throws NamespaceException { + public boolean getQuotaEnabled(String nameOfFS) throws NamespaceException { - int numOfMapRule = retrieveNumberByName(nameOfMapRule, - XMLConst.MAP_RULE_BY_NAME); - return getStringProperty(substituteNumberInMAPElement(numOfMapRule, - XMLConst.MAP_RULE_STFNROOT)); - } - - public String getMapRule_mappedFS(String nameOfMapRule) - throws NamespaceException { + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + boolean result = false; + result = getBooleanProperty(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_ENABLED)); + return result; + } - int numOfMapRule = retrieveNumberByName(nameOfMapRule, - XMLConst.MAP_RULE_BY_NAME); - return getStringProperty(substituteNumberInMAPElement(numOfMapRule, - XMLConst.MAP_RULE_MAPPED_FS)); - } - - /***************************************************************************** - * APPROACHING METHODS - */ - - public int getNumberOfApproachRule() throws NamespaceException { - - return getPropertyNumber(XMLConst.APP_RULE_COUNTING); - } - - public String getApproachRuleName(int numOfAppRule) throws NamespaceException { - - return getStringProperty(substituteNumberInAPPElement(numOfAppRule, - XMLConst.APP_RULE_NAME)); - } - - public String getAppRule_SubjectDN(String nameOfAppRule) - throws NamespaceException { - - int numOfAppRule = retrieveNumberByName(nameOfAppRule, - XMLConst.APP_RULE_BY_NAME); - return getStringProperty(substituteNumberInAPPElement(numOfAppRule, - XMLConst.APP_DN)); - } - - public String getAppRule_SubjectVO(String nameOfAppRule) - throws NamespaceException { - - int numOfAppRule = retrieveNumberByName(nameOfAppRule, - XMLConst.APP_RULE_BY_NAME); - return getStringProperty(substituteNumberInAPPElement(numOfAppRule, - XMLConst.APP_VO_NAME)); - } - - public List getAppRule_AppFS(String nameOfAppRule) throws NamespaceException { - - int numOfAppRule = retrieveNumberByName(nameOfAppRule, - XMLConst.APP_RULE_BY_NAME); - return getListValue(substituteNumberInAPPElement(numOfAppRule, - XMLConst.APPROACHABLE_FS)); - } - - public String getAppRule_RelativePath(String nameOfAppRule) - throws NamespaceException { - - int numOfAppRule = retrieveNumberByName(nameOfAppRule, - XMLConst.APP_RULE_BY_NAME); - return getStringProperty(substituteNumberInAPPElement(numOfAppRule, - XMLConst.APP_SPACE_REL_PATH)); - } - - public String getAppRule_AnonymousHttpRead(String nameOfAppRule) - throws NamespaceException { - - int numOfAppRule = retrieveNumberByName(nameOfAppRule, - XMLConst.APP_RULE_BY_NAME); - return getStringProperty(substituteNumberInAPPElement(numOfAppRule, - XMLConst.APP_ANONYMOUS_HTTP_READ)); - } - - /***************************************************************************** - * QUOTA METHODS - */ - - public boolean getQuotaDefined(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_ENABLED))) { - result = true; - } - return result; - } - - public boolean getQuotaEnabled(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - result = getBooleanProperty(substituteNumberInFSElement(numOfFS, - XMLConst.QUOTA_ENABLED)); - return result; - } - - public boolean getQuotaDeviceDefined(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_DEVICE))) { - result = true; - } - return result; - } - - public String getQuotaDevice(String nameOfFS) throws NamespaceException { - - String result = null; - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_DEVICE))) { - result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.QUOTA_DEVICE)); - } else { - throw new NamespaceException("Unable to find the element '" - + XMLConst.QUOTA_DEVICE + "' for the VFS:'" + nameOfFS + "'"); - } - return result; - } - - public boolean getQuotaFilesetDefined(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - if (isPresent(substituteNumberInFSElement(numOfFS, - XMLConst.QUOTA_FILE_SET_NAME))) { - result = true; - } - return result; - } - - public String getQuotaFileset(String nameOfFS) throws NamespaceException { - - String result = null; - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - if (isPresent(substituteNumberInFSElement(numOfFS, - XMLConst.QUOTA_FILE_SET_NAME))) { - result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.QUOTA_FILE_SET_NAME)); - } else { - throw new NamespaceException("Unable to find the element '" - + XMLConst.QUOTA_FILE_SET_NAME + "' for the VFS:'" + nameOfFS + "'"); - } - return result; - } - - public boolean getQuotaGroupIDDefined(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - if (isPresent(substituteNumberInFSElement(numOfFS, - XMLConst.QUOTA_GROUP_NAME))) { - result = true; - } - return result; - } - - public String getQuotaGroupID(String nameOfFS) throws NamespaceException { - - String result = null; - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - if (isPresent(substituteNumberInFSElement(numOfFS, - XMLConst.QUOTA_GROUP_NAME))) { - result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.QUOTA_GROUP_NAME)); - } else { - throw new NamespaceException("Unable to find the element '" - + XMLConst.QUOTA_GROUP_NAME + "' for the VFS:'" + nameOfFS + "'"); - } - return result; - } - - public boolean getQuotaUserIDDefined(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_USER_NAME))) { - result = true; - } - return result; - } - - public String getQuotaUserID(String nameOfFS) throws NamespaceException { - - String result = null; - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_USER_NAME))) { - result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.QUOTA_USER_NAME)); - } else { - throw new NamespaceException("Unable to find the element '" - + XMLConst.QUOTA_USER_NAME + "' for the VFS:'" + nameOfFS + "'"); - } - return result; - } - - /***************************************************************************** - * STORAGE CLASS METHODs - */ - public String getStorageClass(String nameOfFS) throws NamespaceException { - - String result = XMLConst.DEFAULT_STORAGE_CLASS; - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - if (isPresent(substituteNumberInFSElement(numOfFS, - XMLConst.FS_STORAGE_CLASS))) { - result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.FS_STORAGE_CLASS)); - } else { - log.debug("Storage Class for VFS(+'" + nameOfFS - + "') is absent. Default value ('" + result + "') will be used."); - } - return result; - } - - /***************************************************************************** - * PRIVATE METHOD - *****************************************************************************/ - private String substitutionNumber(String xpath, char patternChar, int number) { - - int startIndex = 0; - int pos = 0; - StringBuilder result = new StringBuilder(); - pos = xpath.indexOf(patternChar, startIndex); - String numStr = Integer.toString(number); - result.append(xpath.substring(startIndex, pos)); - result.append(numStr); - result.append(xpath.substring(pos + 1)); - return result.toString(); - } - - private String substituteNumberInFSElement(int numberOfFS, String element) - throws NamespaceException { - - int numFS = getNumberOfFS(); - if (numberOfFS > numFS) { - throw new NamespaceException("Invalid pointing of Virtual File system"); - } - String new_element = substitutionNumber(element, XMLConst.FS_SUB_PATTERN, - numberOfFS); - return new_element; - } - - private String substituteNumberInACLEntryElement(String nameOfFS, - int numberOfACLEntry, String element) throws NamespaceException { - - int numFS = getFSNumber(nameOfFS); - if (numFS == -1) { - throw new NamespaceException("Virtual File system (" + nameOfFS - + ") does not exists"); - } - int numACL = getNumberOfACL(nameOfFS); - if (numberOfACLEntry > numACL) { - throw new NamespaceException("Invalid pointing of ACL Entry within VFS"); - } - String new_element = substitutionNumber(element, XMLConst.FS_SUB_PATTERN, - numFS); - new_element = substitutionNumber(new_element, - XMLConst.ACL_ENTRY_SUB_PATTERN, numberOfACLEntry); - return new_element; - } - - private String substituteNumberInProtocolElement(String nameOfFS, - int numberOfProtocol, String element) throws NamespaceException { - - int numFS = getFSNumber(nameOfFS); - if (numFS == -1) { - throw new NamespaceException("Virtual File system (" + nameOfFS - + ") does not exists"); - } - int numProt = getNumberOfProt(nameOfFS); - if (numberOfProtocol > numProt) { - throw new NamespaceException("Invalid pointing of Protocol within VFS"); - } - String new_element = substitutionNumber(element, XMLConst.FS_SUB_PATTERN, - numFS); - new_element = substitutionNumber(new_element, XMLConst.PROT_SUB_PATTERN, - numberOfProtocol); - return new_element; - } - - private String substituteNumberInPoolElement(String nameOfFS, - int numberOfPool, String element) throws NamespaceException { - - int numFS = getFSNumber(nameOfFS); - if (numFS == -1) { - throw new NamespaceException("Virtual File system (" + nameOfFS - + ") does not exists"); - } - int numPool = getNumberOfPool(nameOfFS); - if (numberOfPool > numPool) { - throw new NamespaceException("Invalid pointing of Pool within VFS"); - } - String new_element = substitutionNumber(element, XMLConst.FS_SUB_PATTERN, - numFS); - new_element = substitutionNumber(new_element, XMLConst.POOL_SUB_PATTERN, - numberOfPool); - return new_element; - } - - private String substituteNumberInMembersElement(String nameOfFS, - int numOfPool, int numberOfMember, String element) - throws NamespaceException { - - int numFS = getFSNumber(nameOfFS); - if (numFS == -1) { - throw new NamespaceException("Virtual File system (" + nameOfFS - + ") does not exists"); - } - int numMembers = getNumberOfPoolMembers(nameOfFS, numOfPool); - if (numberOfMember > numMembers) { - throw new NamespaceException("Invalid pointing of Member within VFS"); - } - String new_element = substitutionNumber(element, XMLConst.FS_SUB_PATTERN, - numFS); - new_element = substitutionNumber(new_element, XMLConst.POOL_SUB_PATTERN, - numOfPool); - new_element = substitutionNumber(new_element, XMLConst.MEMBER_SUB_PATTERN, - numberOfMember); - return new_element; - } - - private String substituteNumberInMAPElement(int numberOfMapRule, - String element) throws NamespaceException { - - int numMapRule = getNumberOfMappingRule(); - - if (numberOfMapRule > numMapRule) { - throw new NamespaceException("Invalid pointing of Mapping Rule"); - } - String new_element = substitutionNumber(element, XMLConst.MAP_SUB_PATTERN, - numberOfMapRule); - return new_element; - } - - private String substituteNumberInAPPElement(int numberOfAppRule, - String element) throws NamespaceException { - - int numAppRule = getNumberOfApproachRule(); - if (numberOfAppRule > numAppRule) { - throw new NamespaceException("Invalid pointing of Approachable Rule"); - } - String new_element = substitutionNumber(element, - XMLConst.APPRULE_SUB_PATTERN, numberOfAppRule); - return new_element; - } - - private int retrieveNumberByName(String name, String collectionElement, - boolean logging) { - - int result = -1; - int size = -1; - // log.debug(" NAME : "+name+" | Collection Element :"+collectionElement); - List prop = configuration.getList(collectionElement); - if (prop != null) { - size = prop.size(); - // log.debug("Size = "+size); - if (logging) { - for (int i = 0; i < size; i++) { - log.debug(prop.get(i).toString()); - } - } - result = prop.indexOf(name); - } else { - log.warn("[retrieveNumberByName_3] Element <" + collectionElement - + "> does not exists in namespace configuration file"); - } - return result; - } - - private int retrieveNumberByName(String name, String collectionElement) { - - int result = -1; - int size = -1; - // log.debug(" NAME : "+name+" | Collection Element :"+collectionElement); - List prop = configuration.getList(collectionElement); - if (prop != null) { - size = prop.size(); - result = prop.indexOf(name); - } else { - log.warn("[retrieveNumberByName_2] Element <" + collectionElement - + "> does not exists in namespace configuration file"); - } - return result; - } - - public Iterator getKeys() { - - return configuration.getKeys(); - } - - /** - * - * @param element - * String - * @return int - */ - private int getPropertyNumber(String element) { - - int result = -1; - Object prop = configuration.getProperty(element); - if (prop != null) { - result = 1; // If it is not null its value is atleast '1'! - if (prop instanceof Collection) { - result = ((Collection) prop).size(); - } - } else { - log.warn("[getPropertyNumber] Element <" + element - + "> does not exists in namespace configuration file"); - } - - return result; - } - - private boolean isPresent(String element) { - - boolean result = false; - result = configuration.containsKey(element); - // log.debug("XMLPArserUtil: isPresent('"+element+"')="+result); - return result; - } - - /** - * - * @param element - * String - * @return int - */ - private String getStringProperty(String element) throws NamespaceException { - - String prop = null; - try { - prop = configuration.getString(element); - // log.debug("ELEMENT = "+element+" VALUE = "+prop); - } catch (ConversionException ce) { - log.warn("[getStringProperty] Element <" + element - + "> does not contains a String value"); - } catch (NoSuchElementException note) { - log.warn("[getStringProperty] Element <" + element - + "> does not exists in namespace configuration file"); - } - return prop; - } - - /** - * - * @param element - * String - * @return boolean - */ - private boolean getBooleanProperty(String element) throws NamespaceException { - - boolean result = false; - try { - result = configuration.getBoolean(element); - } catch (ConversionException ce) { - log.warn("[getLongProperty] Element <" + element - + "> does not contains a String value"); - } catch (NoSuchElementException note) { - log.warn("[getLongProperty] Element <" + element - + "> does not exists in namespace configuration file"); - } - return result; - } - - /** - * - * @param element - * String - * @return int - */ - private long getLongProperty(String element) throws NamespaceException { - - long prop = -1L; - try { - prop = configuration.getLong(element); - } catch (ConversionException ce) { - log.warn("[getLongProperty] Element <" + element - + "> does not contains a String value"); - } catch (NoSuchElementException note) { - log.warn("[getLongProperty] Element <" + element - + "> does not exists in namespace configuration file"); - } - return prop; - } - - /** - * - * @param element - * String - * @return int - */ - private int getIntProperty(String element) { - - int prop = -1; - try { - prop = configuration.getInt(element); - } catch (ConversionException ce) { - log.warn("[getIntProperty] Element <" + element - + "> does not contains a String value"); - } catch (NoSuchElementException note) { - log.warn("[getIntProperty] Element <" + element - + "> does not exists in namespace configuration file"); - } - return prop; - } - - /** - * - * @param element - * String - * @return int - */ - private String[] getListProperty(String element) throws NamespaceException { - - String prop = null; - try { - prop = configuration.getString(element); - } catch (ConversionException ce) { - log.warn("[getListProperty] Element <" + element - + "> does not contains a String value"); - } catch (NoSuchElementException note) { - log.warn("[getListProperty] Element <" + element - + "> does not exists in namespace configuration file"); - } - // log.debug("LIST : "+prop); - String[] result = prop.split(","); - // log.debug(" LIST lenght :"+result.length); - return result; - } - - private List getListValue(String collectionElement) { - - List propList = configuration.getList(collectionElement); - List prop = Lists.newArrayList(); - // For a set or list - for (Object element2 : propList) { - String element = (String) element2; - prop.add(element.trim()); - } - - log.debug("LIST - prop : " + prop); - log.debug("Nr. of elements : " + prop.size()); - if (prop.size() == 0) { - log.warn("[retrieveNumberByName_2] Element <" + collectionElement - + "> does not exists in namespace configuration file"); - } - return prop; - } - - public boolean getDefaultACLDefined(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.GROUP_NAME))) { - result = true; - } - return result; - } - - public int getNumberOfACL(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - if (numOfFS == -1) { - throw new NamespaceException("FS named '" + nameOfFS - + "' does not exist in config"); - } - String aclCount = substitutionNumber(XMLConst.ACL_ENTRY_COUNTING, - XMLConst.FS_SUB_PATTERN, numOfFS); - log.debug("ACL Count = " + aclCount); - return getPropertyNumber(aclCount); - } - - public String getGroupName(String nameOfFS, int aclEntryNumber) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - String aclCount = substitutionNumber(XMLConst.GROUP_NAME, - XMLConst.FS_SUB_PATTERN, numOfFS); - String result = null; - Object prop = configuration.getProperty(aclCount); - if (prop != null) { - if (prop instanceof Collection) { - ArrayList propList = new ArrayList((Collection) prop); - if (propList.size() > aclEntryNumber) { - result = propList.get(aclEntryNumber); - } - } else { - if (prop instanceof String) { - result = ((String) prop); - } - } - } else { - log.warn("[getPropertyNumber] Element <" + aclCount - + "> does not exists in namespace configuration file"); - } - return result; - // return getStringProperty(substituteNumberInACLEntryElement(nameOfFS, - // aclEntryNumber, XMLConst.GROUP_NAME)); - } - - public String getPermissionString(String nameOfFS, int aclEntryNumber) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - String aclCount = substitutionNumber(XMLConst.PERMISSIONS, - XMLConst.FS_SUB_PATTERN, numOfFS); - String result = null; - Object prop = configuration.getProperty(aclCount); - if (prop != null) { - if (prop instanceof Collection) { - ArrayList propList = new ArrayList((Collection) prop); - if (propList.size() > aclEntryNumber) { - result = propList.get(aclEntryNumber); - } - } else { - if (prop instanceof String) { - result = ((String) prop); - } - } - } else { - log.warn("[getPropertyNumber] Element <" + aclCount - + "> does not exists in namespace configuration file"); - } - return result; - - // return getStringProperty(substituteNumberInACLEntryElement(nameOfFS, - // aclEntryNumber, XMLConst.PERMISSIONS)); - } - - /** - * ********************************** VERSION 1.4.0 - ***************************************/ - - public String getStorageAreaAuthz(String nameOfFS, SAAuthzType type) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - if (type.equals(SAAuthzType.FIXED)) { - return getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.SA_AUTHZ_FIXED)); - } else { - return getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.SA_AUTHZ_DB)); - } - } - - public SAAuthzType getStorageAreaAuthzType(String nameOfFS) - throws NamespaceException { - - if (getStorageAreaAuthzFixedDefined(nameOfFS)) { - return SAAuthzType.FIXED; - } - if (getStorageAreaAuthzDBDefined(nameOfFS)) { - return SAAuthzType.AUTHZDB; - } - throw new NamespaceException("Unable to find the SAAuthzType in " - + nameOfFS); - } - - public boolean getStorageAreaAuthzFixedDefined(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.SA_AUTHZ_FIXED))) { - result = true; - } - return result; - } - - public boolean getStorageAreaAuthzDBDefined(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.SA_AUTHZ_DB))) { - result = true; - } - return result; - } - - public int getProtId(String nameOfFS, int numOfProt) - throws NamespaceException { - - // int numOfProt = getProtNumberByName(nameOfFS, protName); - String protId = substituteNumberInProtocolElement(nameOfFS, numOfProt, - XMLConst.PROT_ID); - // log.debug("ProtID : "+protId); - if (isPresent(protId)) { - return getIntProperty(substituteNumberInProtocolElement(nameOfFS, - numOfProt, XMLConst.PROT_ID)); - } else { - return -1; - } - } - - public boolean getOnlineSpaceLimitedSize(String nameOfFS) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - result = getBooleanProperty(substituteNumberInFSElement(numOfFS, - XMLConst.LIMITED_SIZE)); - return result; - } - - public int getNumberOfPool(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - if (numOfFS == -1) { - throw new NamespaceException("FS named '" + nameOfFS - + "' does not exist in config"); - } - if (!getPoolDefined(nameOfFS)) - return 0; - String protCount = substitutionNumber(XMLConst.POOL_COUNTING, - XMLConst.FS_SUB_PATTERN, numOfFS); - return getPropertyNumber(protCount); - } - - public boolean getPoolDefined(String nameOfFS) throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - boolean result = false; - if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.POOL_COUNTING))) { - result = true; - } - return result; - } - - public String getBalancerStrategy(String nameOfFS) throws NamespaceException { - - String result = null; - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - if (isPresent(substituteNumberInFSElement(numOfFS, - XMLConst.BALANCE_STRATEGY))) { - result = getStringProperty(substituteNumberInFSElement(numOfFS, - XMLConst.BALANCE_STRATEGY)); - } else { - throw new NamespaceException("Unable to find the element '" - + XMLConst.BALANCE_STRATEGY + "' for the VFS:'" + nameOfFS + "'"); - } - return result; - } - - public int getNumberOfPoolMembers(String nameOfFS, int poolCounter) - throws NamespaceException { - - int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); - if (numOfFS == -1) { - throw new NamespaceException("FS named '" + nameOfFS - + "' does not exist in config"); - } - String subTree = substituteNumberInPoolElement(nameOfFS, poolCounter, - XMLConst.POOL); - HierarchicalConfiguration sub = configuration.configurationAt(subTree); - Object members = sub.getProperty("members.member[@member-id]"); - int numOfMembers = -1; - if (members != null) { - if (members instanceof Collection) { - numOfMembers = ((Collection) members).size(); - } else { - numOfMembers = 1; - } - } else { - log.error("Error during the retrieve of the number of pool member of " - + nameOfFS); - } - return numOfMembers; - } - - public int getMemberID(String nameOfFS, int numOfPool, int memberNr) - throws NamespaceException { - - return getIntProperty(substituteNumberInMembersElement(nameOfFS, numOfPool, - memberNr, XMLConst.POOL_MEMBER_ID)); - } - - public int getMemberWeight(String nameOfFS, int numOfPool, int memberNr) - throws NamespaceException { - - return getIntProperty(substituteNumberInMembersElement(nameOfFS, numOfPool, - memberNr, XMLConst.POOL_MEMBER_WEIGHT)); - } - - public String getBalancerStrategy(String fsName, int poolCounter) - throws NamespaceException { - - String poolId = substituteNumberInPoolElement(fsName, poolCounter, - XMLConst.BALANCE_STRATEGY); - if (isPresent(poolId)) { - return getStringProperty(substituteNumberInPoolElement(fsName, - poolCounter, XMLConst.BALANCE_STRATEGY)); - } else { - throw new NamespaceException("Unable to find the element '" - + XMLConst.BALANCE_STRATEGY + "' for the VFS:'" + fsName + "'"); - } - } + public boolean getQuotaDeviceDefined(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + boolean result = false; + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_DEVICE))) { + result = true; + } + return result; + } + + public String getQuotaDevice(String nameOfFS) throws NamespaceException { + + String result = null; + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_DEVICE))) { + result = getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_DEVICE)); + } else { + throw new NamespaceException("Unable to find the element '" + XMLConst.QUOTA_DEVICE + + "' for the VFS:'" + nameOfFS + "'"); + } + return result; + } + + public boolean getQuotaFilesetDefined(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_FILE_SET_NAME)); + } + + public String getQuotaFileset(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + if (!isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_FILE_SET_NAME))) { + String errorMessage = String.format("Unable to find the element '%s' for the VFS:'%s'", + XMLConst.QUOTA_FILE_SET_NAME, nameOfFS); + throw new NamespaceException(errorMessage); + } + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_FILE_SET_NAME)); + } + + public boolean getQuotaGroupIDDefined(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_GROUP_NAME)); + } + + public String getQuotaGroupID(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + if (!isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_GROUP_NAME))) { + String errorMessage = String.format("Unable to find the element '%s' for the VFS:'%s'", + XMLConst.QUOTA_GROUP_NAME, nameOfFS); + throw new NamespaceException(errorMessage); + } + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_GROUP_NAME)); + } + + public boolean getQuotaUserIDDefined(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + return isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_USER_NAME)); + } + + public String getQuotaUserID(String nameOfFS) throws NamespaceException { + + String result = null; + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_USER_NAME))) { + result = getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.QUOTA_USER_NAME)); + } else { + throw new NamespaceException("Unable to find the element '" + XMLConst.QUOTA_USER_NAME + + "' for the VFS:'" + nameOfFS + "'"); + } + return result; + } + + /* + * STORAGE CLASS METHODs + */ + public String getStorageClass(String nameOfFS) throws NamespaceException { + + String result = XMLConst.DEFAULT_STORAGE_CLASS; + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.FS_STORAGE_CLASS))) { + result = getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.FS_STORAGE_CLASS)); + } else { + log.debug("Storage Class for VFS(+'" + nameOfFS + "') is absent. Default value ('" + result + + "') will be used."); + } + return result; + } + + /***************************************************************************** + * PRIVATE METHOD + *****************************************************************************/ + private String substitutionNumber(String xpath, char patternChar, int number) { + + int startIndex = 0; + int pos = 0; + StringBuilder result = new StringBuilder(); + pos = xpath.indexOf(patternChar, startIndex); + String numStr = Integer.toString(number); + result.append(xpath.substring(startIndex, pos)); + result.append(numStr); + result.append(xpath.substring(pos + 1)); + return result.toString(); + } + + private String substituteNumberInFSElement(int numberOfFS, String element) + throws NamespaceException { + + int numFS = getNumberOfFS(); + if (numberOfFS > numFS) { + throw new NamespaceException("Invalid pointing of Virtual File system"); + } + String new_element = substitutionNumber(element, XMLConst.FS_SUB_PATTERN, numberOfFS); + return new_element; + } + + private String substituteNumberInProtocolElement(String nameOfFS, int numberOfProtocol, + String element) throws NamespaceException { + + int numFS = getFSNumber(nameOfFS); + if (numFS == -1) { + throw new NamespaceException("Virtual File system (" + nameOfFS + ") does not exists"); + } + int numProt = getNumberOfProt(nameOfFS); + if (numberOfProtocol > numProt) { + throw new NamespaceException("Invalid pointing of Protocol within VFS"); + } + String new_element = substitutionNumber(element, XMLConst.FS_SUB_PATTERN, numFS); + new_element = substitutionNumber(new_element, XMLConst.PROT_SUB_PATTERN, numberOfProtocol); + return new_element; + } + + private String substituteNumberInPoolElement(String nameOfFS, int numberOfPool, String element) + throws NamespaceException { + + int numFS = getFSNumber(nameOfFS); + if (numFS == -1) { + throw new NamespaceException("Virtual File system (" + nameOfFS + ") does not exists"); + } + int numPool = getNumberOfPool(nameOfFS); + if (numberOfPool > numPool) { + throw new NamespaceException("Invalid pointing of Pool within VFS"); + } + String new_element = substitutionNumber(element, XMLConst.FS_SUB_PATTERN, numFS); + new_element = substitutionNumber(new_element, XMLConst.POOL_SUB_PATTERN, numberOfPool); + return new_element; + } + + private String substituteNumberInMembersElement(String nameOfFS, int numOfPool, + int numberOfMember, String element) throws NamespaceException { + + int numFS = getFSNumber(nameOfFS); + if (numFS == -1) { + throw new NamespaceException("Virtual File system (" + nameOfFS + ") does not exists"); + } + int numMembers = getNumberOfPoolMembers(nameOfFS, numOfPool); + if (numberOfMember > numMembers) { + throw new NamespaceException("Invalid pointing of Member within VFS"); + } + String new_element = substitutionNumber(element, XMLConst.FS_SUB_PATTERN, numFS); + new_element = substitutionNumber(new_element, XMLConst.POOL_SUB_PATTERN, numOfPool); + new_element = substitutionNumber(new_element, XMLConst.MEMBER_SUB_PATTERN, numberOfMember); + return new_element; + } + + private String substituteNumberInMAPElement(int numberOfMapRule, String element) + throws NamespaceException { + + int numMapRule = getNumberOfMappingRule(); + + if (numberOfMapRule > numMapRule) { + throw new NamespaceException("Invalid pointing of Mapping Rule"); + } + return substitutionNumber(element, XMLConst.MAP_SUB_PATTERN, numberOfMapRule); + } + + private String substituteNumberInAPPElement(int numberOfAppRule, String element) + throws NamespaceException { + + int numAppRule = getNumberOfApproachRule(); + if (numberOfAppRule > numAppRule) { + throw new NamespaceException("Invalid pointing of Approachable Rule"); + } + return substitutionNumber(element, XMLConst.APPRULE_SUB_PATTERN, numberOfAppRule); + } + + private int retrieveNumberByName(String name, String collectionElement) { + + int result = -1; + List prop = configuration.getList(collectionElement); + if (prop != null) { + result = prop.indexOf(name); + } else { + log.warn( + "[retrieveNumberByName_2] Element <{}> does not exists in namespace configuration file", + collectionElement); + } + return result; + } + + public Iterator getKeys() { + + return configuration.getKeys(); + } + + /** + * + * @param element String + * @return int + */ + private int getPropertyNumber(String element) { + + int result = -1; + Object prop = configuration.getProperty(element); + if (prop != null) { + result = 1; // If it is not null its value is at least '1'! + if (prop instanceof Collection) { + result = ((Collection) prop).size(); + } + } else { + log.warn("[getPropertyNumber] Element <{}> does not exists in namespace configuration file", + element); + } + + return result; + } + + private boolean isPresent(String element) { + + return configuration.containsKey(element); + } + + /** + * + * @param element String + * @return int + */ + private String getStringProperty(String element) throws NamespaceException { + + String prop = null; + try { + prop = configuration.getString(element); + } catch (ConversionException ce) { + log.warn("[getStringProperty] Element <{}> does not contains a String value", element); + } catch (NoSuchElementException note) { + log.warn("[getStringProperty] Element <{}> does not exists in namespace configuration file", + element); + } + return prop; + } + + /** + * + * @param element String + * @return boolean + */ + private boolean getBooleanProperty(String element) throws NamespaceException { + + boolean result = false; + try { + result = configuration.getBoolean(element); + } catch (ConversionException ce) { + log.warn("[getLongProperty] Element <{}> does not contains a String value", element); + } catch (NoSuchElementException note) { + log.warn("[getLongProperty] Element <{}> does not exists in namespace configuration file", + element); + } + return result; + } + + /** + * + * @param element String + * @return int + */ + private long getLongProperty(String element) throws NamespaceException { + + long prop = -1L; + try { + prop = configuration.getLong(element); + } catch (ConversionException ce) { + log.warn("[getLongProperty] Element <{}> does not contains a String value", element); + } catch (NoSuchElementException note) { + log.warn("[getLongProperty] Element <{}> does not exists in namespace configuration file", + element); + } + return prop; + } + + /** + * + * @param element String + * @return int + */ + private int getIntProperty(String element) { + + int prop = -1; + try { + prop = configuration.getInt(element); + } catch (ConversionException ce) { + log.warn("[getIntProperty] Element <{}> does not contains a String value", element); + } catch (NoSuchElementException note) { + log.warn("[getIntProperty] Element <{}> does not exists in namespace configuration file", + element); + } + return prop; + } + + private List getListValue(String collectionElement) { + + List propList = configuration.getList(collectionElement); + List prop = Lists.newArrayList(); + // For a set or list + for (Object element2 : propList) { + String element = (String) element2; + prop.add(element.trim()); + } + + log.debug("LIST - prop : {}", prop); + log.debug("Nr. of elements : {}", prop.size()); + if (prop.size() == 0) { + log.warn( + "[retrieveNumberByName_2] Element <{}> does not exists in namespace configuration file", + collectionElement); + } + return prop; + } + + public boolean getDefaultACLDefined(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + boolean result = false; + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.GROUP_NAME))) { + result = true; + } + return result; + } + + public int getNumberOfACL(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + if (numOfFS == -1) { + throw new NamespaceException("FS named '" + nameOfFS + "' does not exist in config"); + } + String aclCount = + substitutionNumber(XMLConst.ACL_ENTRY_COUNTING, XMLConst.FS_SUB_PATTERN, numOfFS); + log.debug("ACL Count = {}", aclCount); + return getPropertyNumber(aclCount); + } + + @SuppressWarnings("unchecked") + public String getGroupName(String nameOfFS, int aclEntryNumber) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + String aclCount = substitutionNumber(XMLConst.GROUP_NAME, XMLConst.FS_SUB_PATTERN, numOfFS); + String result = null; + Object prop = configuration.getProperty(aclCount); + if (prop != null) { + if (prop instanceof List) { + List propList = Lists.newArrayList((List) prop); + if (propList.size() > aclEntryNumber) { + result = (String) propList.get(aclEntryNumber); + } + } else { + if (prop instanceof String) { + result = ((String) prop); + } + } + } else { + log.warn("[getPropertyNumber] Element <{}> does not exists in namespace configuration file", + aclCount); + } + return result; + } + + @SuppressWarnings("unchecked") + public String getPermissionString(String nameOfFS, int aclEntryNumber) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + String aclCount = substitutionNumber(XMLConst.PERMISSIONS, XMLConst.FS_SUB_PATTERN, numOfFS); + String result = null; + Object prop = configuration.getProperty(aclCount); + if (prop != null) { + if (prop instanceof List) { + List propList = Lists.newArrayList((List) prop); + if (propList.size() > aclEntryNumber) { + result = propList.get(aclEntryNumber); + } + } else { + if (prop instanceof String) { + result = ((String) prop); + } + } + } else { + log.warn("[getPropertyNumber] Element <" + aclCount + + "> does not exists in namespace configuration file"); + } + return result; + } + + public String getStorageAreaAuthz(String nameOfFS, SAAuthzType type) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + if (type.equals(SAAuthzType.FIXED)) { + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.SA_AUTHZ_FIXED)); + } else { + return getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.SA_AUTHZ_DB)); + } + } + + public SAAuthzType getStorageAreaAuthzType(String nameOfFS) throws NamespaceException { + + if (getStorageAreaAuthzFixedDefined(nameOfFS)) { + return SAAuthzType.FIXED; + } + if (getStorageAreaAuthzDBDefined(nameOfFS)) { + return SAAuthzType.AUTHZDB; + } + throw new NamespaceException("Unable to find the SAAuthzType in " + nameOfFS); + } + + public boolean getStorageAreaAuthzFixedDefined(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + boolean result = false; + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.SA_AUTHZ_FIXED))) { + result = true; + } + return result; + } + + public boolean getStorageAreaAuthzDBDefined(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + boolean result = false; + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.SA_AUTHZ_DB))) { + result = true; + } + return result; + } + + public int getProtId(String nameOfFS, int numOfProt) throws NamespaceException { + + String protId = substituteNumberInProtocolElement(nameOfFS, numOfProt, XMLConst.PROT_ID); + if (isPresent(protId)) { + return getIntProperty( + substituteNumberInProtocolElement(nameOfFS, numOfProt, XMLConst.PROT_ID)); + } else { + return -1; + } + } + + public boolean getOnlineSpaceLimitedSize(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + boolean result = false; + result = getBooleanProperty(substituteNumberInFSElement(numOfFS, XMLConst.LIMITED_SIZE)); + return result; + } + + public int getNumberOfPool(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + if (numOfFS == -1) { + throw new NamespaceException("FS named '" + nameOfFS + "' does not exist in config"); + } + if (!getPoolDefined(nameOfFS)) + return 0; + String protCount = substitutionNumber(XMLConst.POOL_COUNTING, XMLConst.FS_SUB_PATTERN, numOfFS); + log.debug("Number of pools = {}", protCount); + return getPropertyNumber(protCount); + } + + public boolean getPoolDefined(String nameOfFS) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + boolean result = false; + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.POOL_COUNTING))) { + result = true; + } + return result; + } + + public String getBalancerStrategy(String nameOfFS) throws NamespaceException { + + String result = null; + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + if (isPresent(substituteNumberInFSElement(numOfFS, XMLConst.BALANCE_STRATEGY))) { + result = getStringProperty(substituteNumberInFSElement(numOfFS, XMLConst.BALANCE_STRATEGY)); + } else { + throw new NamespaceException("Unable to find the element '" + XMLConst.BALANCE_STRATEGY + + "' for the VFS:'" + nameOfFS + "'"); + } + return result; + } + + public int getNumberOfPoolMembers(String nameOfFS, int poolCounter) throws NamespaceException { + + int numOfFS = retrieveNumberByName(nameOfFS, XMLConst.FS_BY_NAME); + if (numOfFS == -1) { + throw new NamespaceException("FS named '" + nameOfFS + "' does not exist in config"); + } + String subTree = substituteNumberInPoolElement(nameOfFS, poolCounter, XMLConst.POOL); + HierarchicalConfiguration sub = configuration.configurationAt(subTree); + Object members = sub.getProperty("members.member[@member-id]"); + int numOfMembers = -1; + if (members != null) { + if (members instanceof Collection) { + numOfMembers = ((Collection) members).size(); + } else { + numOfMembers = 1; + } + } else { + log.error("Error during the retrieve of the number of pool member of {}", nameOfFS); + } + return numOfMembers; + } + + public int getMemberID(String nameOfFS, int numOfPool, int memberNr) throws NamespaceException { + + return getIntProperty( + substituteNumberInMembersElement(nameOfFS, numOfPool, memberNr, XMLConst.POOL_MEMBER_ID)); + } + + public int getMemberWeight(String nameOfFS, int numOfPool, int memberNr) + throws NamespaceException { + + return getIntProperty(substituteNumberInMembersElement(nameOfFS, numOfPool, memberNr, + XMLConst.POOL_MEMBER_WEIGHT)); + } + + public String getBalancerStrategy(String fsName, int poolCounter) throws NamespaceException { + + String poolId = substituteNumberInPoolElement(fsName, poolCounter, XMLConst.BALANCE_STRATEGY); + if (isPresent(poolId)) { + return getStringProperty( + substituteNumberInPoolElement(fsName, poolCounter, XMLConst.BALANCE_STRATEGY)); + } else { + throw new NamespaceException("Unable to find the element '" + XMLConst.BALANCE_STRATEGY + + "' for the VFS:'" + fsName + "'"); + } + } } diff --git a/src/main/java/it/grid/storm/namespace/model/ApproachableRule.java b/src/main/java/it/grid/storm/namespace/model/ApproachableRule.java index a7760809c..8d0587efa 100644 --- a/src/main/java/it/grid/storm/namespace/model/ApproachableRule.java +++ b/src/main/java/it/grid/storm/namespace/model/ApproachableRule.java @@ -4,246 +4,231 @@ */ package it.grid.storm.namespace.model; -import it.grid.storm.griduser.AbstractGridUser; -import it.grid.storm.griduser.DistinguishedName; -import it.grid.storm.griduser.GridUserInterface; -import it.grid.storm.namespace.NamespaceDirector; - import java.util.LinkedList; import java.util.List; import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import it.grid.storm.griduser.AbstractGridUser; +import it.grid.storm.griduser.DistinguishedName; +import it.grid.storm.griduser.GridUserInterface; public class ApproachableRule implements Comparable { - private Logger log = NamespaceDirector.getLogger(); + private static Logger log = LoggerFactory.getLogger(ApproachableRule.class); + + private final String ruleName; + private final SubjectRules subjectRules; + + private String relativePath = null; + private LinkedList appFS = new LinkedList(); + + private final boolean anonymousHttpReadAccess; + + public ApproachableRule(String rulename, SubjectRules subjectRules, String relativePath, + boolean anonymousHttpReadAccess) { - private final String ruleName; - private final SubjectRules subjectRules; + this.ruleName = rulename; + this.subjectRules = subjectRules; + /** + * @todo : Check if relative Path is a path well formed. + */ + this.relativePath = relativePath; + this.anonymousHttpReadAccess = anonymousHttpReadAccess; + } - private String relativePath = null; - private LinkedList appFS = new LinkedList(); - - private final boolean anonymousHttpReadAccess; - - public ApproachableRule(String rulename, SubjectRules subjectRules, - String relativePath, boolean anonymousHttpReadAccess) { - - this.ruleName = rulename; - this.subjectRules = subjectRules; - /** - * @todo : Check if relative Path is a path well formed. - */ - this.relativePath = relativePath; - this.anonymousHttpReadAccess = anonymousHttpReadAccess; - } - - public ApproachableRule(String rulename, SubjectRules subjectRules, - String relativePath) { - - this.ruleName = rulename; - this.subjectRules = subjectRules; - /** - * @todo : Check if relative Path is a path well formed. - */ - this.relativePath = relativePath; - this.anonymousHttpReadAccess = false; - } - - public boolean isAdmitAll() { - - return subjectRules.getDNMatchingRule().isMatchAll() - && subjectRules.getVONameMatchingRule().isMatchAll(); - } - - public void addApproachableVFS(VirtualFS vfs) { - - this.appFS.add(vfs); - } - - public List getApproachableVFS() { - - return this.appFS; - } - - /** - * getSpaceRelativePath - * - * @return String - */ - public String getSpaceRelativePath() { - - return relativePath; - } - - /** - * - * @return String - */ - public String getRuleName() { - - return this.ruleName; - } - - public boolean getAnonymousHttpReadAccess() { - - return this.anonymousHttpReadAccess; - } - - /** - * - * @return Subject - */ - public SubjectRules getSubjectRules() { - - return this.subjectRules; - } - - /** - * MAIN METHOD - * - * @param gUser - * GridUserInterface - * @return boolean - */ - public boolean match(GridUserInterface gUser) { - - return matchDN(gUser.getDn()) && matchVoms(gUser); - } - - private boolean matchVoms(GridUserInterface gUser) { - - // ---- Check if VOMS Attributes are required ---- - if (subjectRules.getVONameMatchingRule().isMatchAll()) { - return true; - } - // VOMS Attribute required. - if (gUser instanceof AbstractGridUser - && ((AbstractGridUser) gUser).hasVoms()) { - log.debug("Grid User Requestor : " - + ((AbstractGridUser) gUser).toString()); - if (subjectRules.getVONameMatchingRule().match( - ((AbstractGridUser) gUser).getVO().getValue())) { - return true; - } - } - return false; - } - - private boolean matchDN(String dnString) { - - if (dnString == null) { - return subjectRules.getDNMatchingRule().isMatchAll(); - } - DistinguishedName dn = new DistinguishedName(dnString); - return subjectRules.getDNMatchingRule().match(dn); - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - String sep = System.getProperty("line.separator"); - sb.append(sep + " --- APPROACHABLE RULE NAME ---" + sep); - sb.append(" Approachable Rule Name : " + this.ruleName + sep); - sb.append(" SUBJECT - dn : " - + this.getSubjectRules().getDNMatchingRule() + sep); - if (!this.getSubjectRules().getVONameMatchingRule().isMatchAll()) { - sb.append(" -- VOMS cert IS MANDATORY!" + sep); - sb.append(" -- SUBJECT - vo_name : " - + this.getSubjectRules().getVONameMatchingRule() + sep); - } else { - sb.append(" -- VOMS cert is not mandatory" + sep); - } - sb.append(" Relative-Path for Space : " + this.getSpaceRelativePath() - + sep); - sb.append(" Approachable VFS : " + this.appFS + sep); - return sb.toString(); - } - - public int compareTo(Object o) { - - int result = 1; - if (o instanceof ApproachableRule) { - ApproachableRule other = (ApproachableRule) o; - result = (this.getRuleName()).compareTo(other.getRuleName()); - } - return result; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - - final int prime = 31; - int result = 1; - result = prime * result + ((appFS == null) ? 0 : appFS.hashCode()); - result = prime * result + ((log == null) ? 0 : log.hashCode()); - result = prime * result - + ((relativePath == null) ? 0 : relativePath.hashCode()); - result = prime * result + ((ruleName == null) ? 0 : ruleName.hashCode()); - result = prime * result - + ((subjectRules == null) ? 0 : subjectRules.hashCode()); - return result; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object obj) { - - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - ApproachableRule other = (ApproachableRule) obj; - if (appFS == null) { - if (other.appFS != null) { - return false; - } - } else if (!appFS.equals(other.appFS)) { - return false; - } - if (log == null) { - if (other.log != null) { - return false; - } - } else if (!log.equals(other.log)) { - return false; - } - if (relativePath == null) { - if (other.relativePath != null) { - return false; - } - } else if (!relativePath.equals(other.relativePath)) { - return false; - } - if (ruleName == null) { - if (other.ruleName != null) { - return false; - } - } else if (!ruleName.equals(other.ruleName)) { - return false; - } - if (subjectRules == null) { - if (other.subjectRules != null) { - return false; - } - } else if (!subjectRules.equals(other.subjectRules)) { - return false; - } - return true; - } + public ApproachableRule(String rulename, SubjectRules subjectRules, String relativePath) { + + this.ruleName = rulename; + this.subjectRules = subjectRules; + /** + * @todo : Check if relative Path is a path well formed. + */ + this.relativePath = relativePath; + this.anonymousHttpReadAccess = false; + } + + public boolean isAdmitAll() { + + return subjectRules.getDNMatchingRule().isMatchAll() + && subjectRules.getVONameMatchingRule().isMatchAll(); + } + + public void addApproachableVFS(VirtualFS vfs) { + + this.appFS.add(vfs); + } + + public List getApproachableVFS() { + + return this.appFS; + } + + /** + * getSpaceRelativePath + * + * @return String + */ + public String getSpaceRelativePath() { + + return relativePath; + } + + /** + * + * @return String + */ + public String getRuleName() { + + return this.ruleName; + } + + public boolean getAnonymousHttpReadAccess() { + + return this.anonymousHttpReadAccess; + } + + /** + * + * @return Subject + */ + public SubjectRules getSubjectRules() { + + return this.subjectRules; + } + + /** + * MAIN METHOD + * + * @param gUser GridUserInterface + * @return boolean + */ + public boolean match(GridUserInterface gUser) { + + return matchDN(gUser.getDn()) && matchVoms(gUser); + } + + private boolean matchVoms(GridUserInterface gUser) { + + // ---- Check if VOMS Attributes are required ---- + if (subjectRules.getVONameMatchingRule().isMatchAll()) { + return true; + } + // VOMS Attribute required. + if (gUser instanceof AbstractGridUser && ((AbstractGridUser) gUser).hasVoms()) { + log.debug("Grid User Requestor : " + ((AbstractGridUser) gUser).toString()); + if (subjectRules.getVONameMatchingRule() + .match(((AbstractGridUser) gUser).getVO().getValue())) { + return true; + } + } + return false; + } + + private boolean matchDN(String dnString) { + + if (dnString == null) { + return subjectRules.getDNMatchingRule().isMatchAll(); + } + DistinguishedName dn = new DistinguishedName(dnString); + return subjectRules.getDNMatchingRule().match(dn); + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + String sep = System.getProperty("line.separator"); + sb.append(sep + " --- APPROACHABLE RULE NAME ---" + sep); + sb.append(" Approachable Rule Name : " + this.ruleName + sep); + sb.append(" SUBJECT - dn : " + this.getSubjectRules().getDNMatchingRule() + sep); + if (!this.getSubjectRules().getVONameMatchingRule().isMatchAll()) { + sb.append(" -- VOMS cert IS MANDATORY!" + sep); + sb.append(" -- SUBJECT - vo_name : " + this.getSubjectRules().getVONameMatchingRule() + + sep); + } else { + sb.append(" -- VOMS cert is not mandatory" + sep); + } + sb.append(" Relative-Path for Space : " + this.getSpaceRelativePath() + sep); + sb.append(" Approachable VFS : " + this.appFS + sep); + return sb.toString(); + } + + public int compareTo(Object o) { + + int result = 1; + if (o instanceof ApproachableRule) { + ApproachableRule other = (ApproachableRule) o; + result = (this.getRuleName()).compareTo(other.getRuleName()); + } + return result; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + + final int prime = 31; + int result = 1; + result = prime * result + ((appFS == null) ? 0 : appFS.hashCode()); + result = prime * result + ((log == null) ? 0 : log.hashCode()); + result = prime * result + ((relativePath == null) ? 0 : relativePath.hashCode()); + result = prime * result + ((ruleName == null) ? 0 : ruleName.hashCode()); + result = prime * result + ((subjectRules == null) ? 0 : subjectRules.hashCode()); + return result; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object obj) { + + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + ApproachableRule other = (ApproachableRule) obj; + if (appFS == null) { + if (other.appFS != null) { + return false; + } + } else if (!appFS.equals(other.appFS)) { + return false; + } + if (relativePath == null) { + if (other.relativePath != null) { + return false; + } + } else if (!relativePath.equals(other.relativePath)) { + return false; + } + if (ruleName == null) { + if (other.ruleName != null) { + return false; + } + } else if (!ruleName.equals(other.ruleName)) { + return false; + } + if (subjectRules == null) { + if (other.subjectRules != null) { + return false; + } + } else if (!subjectRules.equals(other.subjectRules)) { + return false; + } + return true; + } } diff --git a/src/main/java/it/grid/storm/namespace/model/DefaultValues.java b/src/main/java/it/grid/storm/namespace/model/DefaultValues.java index 9ac984521..d273184dc 100644 --- a/src/main/java/it/grid/storm/namespace/model/DefaultValues.java +++ b/src/main/java/it/grid/storm/namespace/model/DefaultValues.java @@ -4,10 +4,11 @@ */ package it.grid.storm.namespace.model; -import it.grid.storm.common.types.SizeUnit; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.common.types.TimeUnit; import it.grid.storm.namespace.DefaultValuesInterface; -import it.grid.storm.namespace.NamespaceDirector; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.srm.types.InvalidTSizeAttributesException; import it.grid.storm.srm.types.TFileStorageType; @@ -15,317 +16,238 @@ import it.grid.storm.srm.types.TSizeInBytes; import it.grid.storm.srm.types.TSpaceType; -import org.slf4j.Logger; - -/** - *

- * Title: - *

- * - *

- * Description: - *

- * - *

- * Copyright: Copyright (c) 2006 - *

- * - *

- * Company: INFN-CNAF and ICTP/eGrid project - *

- * - * @author Riccardo Zappi - * @version 1.0 - */ public class DefaultValues implements DefaultValuesInterface { - private Logger log = NamespaceDirector.getLogger(); - private SpaceDefault spaceDefault; - private FileDefault fileDefault; - - public DefaultValues(SpaceDefault spaceDefault, FileDefault fileDefault) { - - this.spaceDefault = spaceDefault; - this.fileDefault = fileDefault; - } - - public DefaultValues() { - - try { - this.spaceDefault = new SpaceDefault(); - } catch (NamespaceException ex) { - log.error("Something was wrong building default Space Default Values"); - } - try { - this.fileDefault = new FileDefault(); - } catch (NamespaceException ex1) { - log.error("Something was wrong building default File Default Values"); - } - } - - public void setSpaceDefaults(String type, long lifetime, long guarsize, - long totalsize) throws NamespaceException { - - this.spaceDefault = new SpaceDefault(type, lifetime, guarsize, totalsize); - } - - public void setFileDefaults(String type, long lifetime) - throws NamespaceException { - - this.fileDefault = new FileDefault(type, lifetime); - } - - public TLifeTimeInSeconds getDefaultSpaceLifetime() { - - return spaceDefault.lifetime; - } - - public TSpaceType getDefaultSpaceType() { - - return spaceDefault.type; - } - - public TSizeInBytes getDefaultGuaranteedSpaceSize() { - - return spaceDefault.guarsize; - } - - public TSizeInBytes getDefaultTotalSpaceSize() { - - return spaceDefault.totalsize; - } - - public TLifeTimeInSeconds getDefaultFileLifeTime() { - - return fileDefault.lifetime; - } - - public TFileStorageType getDefaultFileType() { - - return fileDefault.type; - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - String sep = System.getProperty("line.separator"); - sb.append(" DEF. Space Lifetime : " - + this.getDefaultSpaceLifetime() + sep); - sb.append(" DEF. Space Guar. size : " - + this.getDefaultGuaranteedSpaceSize() + sep); - sb.append(" DEF. Space Tot. size : " - + this.getDefaultTotalSpaceSize() + sep); - sb.append(" DEF. Space Type : " + this.getDefaultSpaceType() - + sep); - sb.append(" DEF. File Lifetime : " + this.getDefaultFileLifeTime() - + sep); - sb.append(" DEF. File Type : " + this.getDefaultFileType() - + sep); - return sb.toString(); - } - - /************************************************************************** - * INNER CLASS - **************************************************************************/ - - /** - * - *

- * Title: - *

- * - *

- * Description: - *

- * - *

- * Copyright: Copyright (c) 2006 - *

- * - *

- * Company: INFN-CNAF and ICTP/eGrid project - *

- * - * @author Riccardo Zappi - * @version 1.0 - */ - public class SpaceDefault { - - private TSpaceType type = null; - private TLifeTimeInSeconds lifetime; - private TSizeInBytes guarsize; - private TSizeInBytes totalsize; - - public SpaceDefault() throws NamespaceException { - - // Build space type - this.type = TSpaceType.getTSpaceType(DefaultValues.DEFAULT_SPACE_TYPE); - // Build lifetime - try { - this.lifetime = TLifeTimeInSeconds.make(DefaultValues.DEFAULT_SPACE_LT, - TimeUnit.SECONDS); - } catch (IllegalArgumentException ex) { - log.error(" Default Space Lifetime was wrong "); - throw new NamespaceException( - "Space Lifetime invalid argument in Namespace configuration.", ex); - } - // Build of Guaranteed Space Size - try { - this.guarsize = TSizeInBytes.make( - DefaultValues.DEFAULT_SPACE_GUAR_SIZE, SizeUnit.BYTES); - } catch (InvalidTSizeAttributesException ex1) { - log.error(" Default Guaranteed Space Size was wrong "); - throw new NamespaceException( - " Guaranteed Space Size invalid argument in Namespace configuration.", - ex1); - } - - // Build of Total Space Size - try { - this.totalsize = TSizeInBytes.make( - DefaultValues.DEFAULT_SPACE_TOT_SIZE, SizeUnit.BYTES); - } catch (InvalidTSizeAttributesException ex2) { - log.error(" Default Total Space Size was wrong "); - throw new NamespaceException( - "Total Space Size invalid argument in Namespace configuration.", ex2); - } - } - - public SpaceDefault(String type, long lifetime, long guarsize, - long totalsize) throws NamespaceException { - - // Build space type - this.type = TSpaceType.getTSpaceType(type); - - // Build lifetime - try { - this.lifetime = TLifeTimeInSeconds.make(lifetime, TimeUnit.SECONDS); - } catch (IllegalArgumentException ex) { - log.error(" Default Space Lifetime was wrong "); - throw new NamespaceException( - "Space Lifetime invalid argument in Namespace configuration.", ex); - } - - // Checking of size - if (guarsize > totalsize) { - log - .error(" Default Space Guaranteed Size is greater of Space Total Size !"); - throw new NamespaceException( - "Space size (Guar and Total) are invalid in Namespace configuration."); - } - - // Build of Guaranteed Space Size - try { - this.guarsize = TSizeInBytes.make(guarsize, SizeUnit.BYTES); - } catch (InvalidTSizeAttributesException ex1) { - log.error(" Default Guaranteed Space Size was wrong "); - throw new NamespaceException( - " Guaranteed Space Size invalid argument in Namespace configuration.", - ex1); - } - - // Build of Total Space Size - try { - this.totalsize = TSizeInBytes.make(totalsize, SizeUnit.BYTES); - } catch (InvalidTSizeAttributesException ex2) { - log.error(" Default Total Space Size was wrong "); - throw new NamespaceException( - "Total Space Size invalid argument in Namespace configuration.", ex2); - } - } - - public TSpaceType getSpaceType() { - - return type; - } - - public TLifeTimeInSeconds getLifetime() { - - return lifetime; - } - - public TSizeInBytes guarsize() { - - return guarsize; - } - - public TSizeInBytes totalsize() { - - return totalsize; - } - - } - - /** - * - *

- * Title: - *

- * - *

- * Description: - *

- * - *

- * Copyright: Copyright (c) 2006 - *

- * - *

- * Company: INFN-CNAF and ICTP/eGrid project - *

- * - * @author Riccardo Zappi - * @version 1.0 - */ - public class FileDefault { - - private TFileStorageType type = null; - private TLifeTimeInSeconds lifetime; - - public FileDefault() throws NamespaceException { - - // Build space type - this.type = TFileStorageType - .getTFileStorageType(DefaultValues.DEFAULT_FILE_TYPE); - - // Build lifetime - try { - this.lifetime = TLifeTimeInSeconds.make(DefaultValues.DEFAULT_FILE_LT, - TimeUnit.SECONDS); - } catch (IllegalArgumentException ex) { - log.error(" Default Space Lifetime was wrong "); - throw new NamespaceException( - "Space Lifetime invalid argument in Namespace configuration.", ex); - } - - } - - public FileDefault(String type, long lifetime) throws NamespaceException { - - // Build space type - this.type = TFileStorageType.getTFileStorageType(type); - - // Build lifetime - try { - this.lifetime = TLifeTimeInSeconds.make(lifetime, TimeUnit.SECONDS); - } catch (IllegalArgumentException ex) { - log.error(" Default Space Lifetime was wrong "); - throw new NamespaceException( - "Space Lifetime invalid argument in Namespace configuration.", ex); - } - } - - public TFileStorageType getFileStorageType() { - - return type; - } - - public TLifeTimeInSeconds getLifetime() { - - return lifetime; - } - - } + private static Logger log = LoggerFactory.getLogger(DefaultValues.class); + private SpaceDefault spaceDefault; + private FileDefault fileDefault; + + public DefaultValues(SpaceDefault spaceDefault, FileDefault fileDefault) { + + this.spaceDefault = spaceDefault; + this.fileDefault = fileDefault; + } + + public DefaultValues() { + + try { + this.spaceDefault = new SpaceDefault(); + } catch (NamespaceException ex) { + log.error("Something was wrong building default Space Default Values"); + } + try { + this.fileDefault = new FileDefault(); + } catch (NamespaceException ex1) { + log.error("Something was wrong building default File Default Values"); + } + } + + public void setSpaceDefaults(String type, long lifetime, long guarsize, long totalsize) + throws NamespaceException { + + this.spaceDefault = new SpaceDefault(type, lifetime, guarsize, totalsize); + } + + public void setFileDefaults(String type, long lifetime) throws NamespaceException { + + this.fileDefault = new FileDefault(type, lifetime); + } + + public TLifeTimeInSeconds getDefaultSpaceLifetime() { + + return spaceDefault.lifetime; + } + + public TSpaceType getDefaultSpaceType() { + + return spaceDefault.type; + } + + public TSizeInBytes getDefaultGuaranteedSpaceSize() { + + return spaceDefault.guarsize; + } + + public TSizeInBytes getDefaultTotalSpaceSize() { + + return spaceDefault.totalsize; + } + + public TLifeTimeInSeconds getDefaultFileLifeTime() { + + return fileDefault.lifetime; + } + + public TFileStorageType getDefaultFileType() { + + return fileDefault.type; + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + String sep = System.getProperty("line.separator"); + sb.append(" DEF. Space Lifetime : " + this.getDefaultSpaceLifetime() + sep); + sb.append(" DEF. Space Guar. size : " + this.getDefaultGuaranteedSpaceSize() + sep); + sb.append(" DEF. Space Tot. size : " + this.getDefaultTotalSpaceSize() + sep); + sb.append(" DEF. Space Type : " + this.getDefaultSpaceType() + sep); + sb.append(" DEF. File Lifetime : " + this.getDefaultFileLifeTime() + sep); + sb.append(" DEF. File Type : " + this.getDefaultFileType() + sep); + return sb.toString(); + } + + /************************************************************************** + * INNER CLASS + **************************************************************************/ + + public class SpaceDefault { + + private TSpaceType type = null; + private TLifeTimeInSeconds lifetime; + private TSizeInBytes guarsize; + private TSizeInBytes totalsize; + + public SpaceDefault() throws NamespaceException { + + // Build space type + this.type = TSpaceType.getTSpaceType(DefaultValues.DEFAULT_SPACE_TYPE); + // Build lifetime + try { + this.lifetime = TLifeTimeInSeconds.make(DefaultValues.DEFAULT_SPACE_LT, TimeUnit.SECONDS); + } catch (IllegalArgumentException ex) { + log.error(" Default Space Lifetime was wrong "); + throw new NamespaceException("Space Lifetime invalid argument in Namespace configuration.", + ex); + } + // Build of Guaranteed Space Size + try { + this.guarsize = TSizeInBytes.make(DefaultValues.DEFAULT_SPACE_GUAR_SIZE); + } catch (InvalidTSizeAttributesException ex1) { + log.error(" Default Guaranteed Space Size was wrong "); + throw new NamespaceException( + " Guaranteed Space Size invalid argument in Namespace configuration.", ex1); + } + + // Build of Total Space Size + try { + this.totalsize = TSizeInBytes.make(DefaultValues.DEFAULT_SPACE_TOT_SIZE); + } catch (InvalidTSizeAttributesException ex2) { + log.error(" Default Total Space Size was wrong "); + throw new NamespaceException( + "Total Space Size invalid argument in Namespace configuration.", ex2); + } + } + + public SpaceDefault(String type, long lifetime, long guarsize, long totalsize) + throws NamespaceException { + + // Build space type + this.type = TSpaceType.getTSpaceType(type); + + // Build lifetime + try { + this.lifetime = TLifeTimeInSeconds.make(lifetime, TimeUnit.SECONDS); + } catch (IllegalArgumentException ex) { + log.error(" Default Space Lifetime was wrong "); + throw new NamespaceException("Space Lifetime invalid argument in Namespace configuration.", + ex); + } + + // Checking of size + if (guarsize > totalsize) { + log.error(" Default Space Guaranteed Size is greater of Space Total Size !"); + throw new NamespaceException( + "Space size (Guar and Total) are invalid in Namespace configuration."); + } + + // Build of Guaranteed Space Size + try { + this.guarsize = TSizeInBytes.make(guarsize); + } catch (InvalidTSizeAttributesException ex1) { + log.error(" Default Guaranteed Space Size was wrong "); + throw new NamespaceException( + " Guaranteed Space Size invalid argument in Namespace configuration.", ex1); + } + + // Build of Total Space Size + try { + this.totalsize = TSizeInBytes.make(totalsize); + } catch (InvalidTSizeAttributesException ex2) { + log.error(" Default Total Space Size was wrong "); + throw new NamespaceException( + "Total Space Size invalid argument in Namespace configuration.", ex2); + } + } + + public TSpaceType getSpaceType() { + + return type; + } + + public TLifeTimeInSeconds getLifetime() { + + return lifetime; + } + + public TSizeInBytes guarsize() { + + return guarsize; + } + + public TSizeInBytes totalsize() { + + return totalsize; + } + + } + + public class FileDefault { + + private TFileStorageType type = null; + private TLifeTimeInSeconds lifetime; + + public FileDefault() throws NamespaceException { + + // Build space type + this.type = TFileStorageType.getTFileStorageType(DefaultValues.DEFAULT_FILE_TYPE); + + // Build lifetime + try { + this.lifetime = TLifeTimeInSeconds.make(DefaultValues.DEFAULT_FILE_LT, TimeUnit.SECONDS); + } catch (IllegalArgumentException ex) { + log.error(" Default Space Lifetime was wrong "); + throw new NamespaceException("Space Lifetime invalid argument in Namespace configuration.", + ex); + } + + } + + public FileDefault(String type, long lifetime) throws NamespaceException { + + // Build space type + this.type = TFileStorageType.getTFileStorageType(type); + + // Build lifetime + try { + this.lifetime = TLifeTimeInSeconds.make(lifetime, TimeUnit.SECONDS); + } catch (IllegalArgumentException ex) { + log.error(" Default Space Lifetime was wrong "); + throw new NamespaceException("Space Lifetime invalid argument in Namespace configuration.", + ex); + } + } + + public TFileStorageType getFileStorageType() { + + return type; + } + + public TLifeTimeInSeconds getLifetime() { + + return lifetime; + } + + } } diff --git a/src/main/java/it/grid/storm/namespace/model/Property.java b/src/main/java/it/grid/storm/namespace/model/Property.java index bf71db1a4..efddba785 100644 --- a/src/main/java/it/grid/storm/namespace/model/Property.java +++ b/src/main/java/it/grid/storm/namespace/model/Property.java @@ -4,224 +4,189 @@ */ package it.grid.storm.namespace.model; -import it.grid.storm.common.types.SizeUnit; -import it.grid.storm.namespace.NamespaceDirector; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.PropertyInterface; import it.grid.storm.srm.types.InvalidTSizeAttributesException; import it.grid.storm.srm.types.TSizeInBytes; -import org.slf4j.Logger; - public class Property implements PropertyInterface { - private Logger log = NamespaceDirector.getLogger(); - private TSizeInBytes totalOnlineSize = TSizeInBytes.makeEmpty(); - private TSizeInBytes totalNearlineSize = TSizeInBytes.makeEmpty(); - private RetentionPolicy retentionPolicy = RetentionPolicy.UNKNOWN; - private ExpirationMode expirationMode = ExpirationMode.UNKNOWN; - private AccessLatency accessLatency = AccessLatency.UNKNOWN; - private boolean hasLimitedSize = false; + private static Logger log = LoggerFactory.getLogger(Property.class); + + private TSizeInBytes totalOnlineSize = TSizeInBytes.makeEmpty(); + private TSizeInBytes totalNearlineSize = TSizeInBytes.makeEmpty(); + private RetentionPolicy retentionPolicy = RetentionPolicy.UNKNOWN; + private ExpirationMode expirationMode = ExpirationMode.UNKNOWN; + private AccessLatency accessLatency = AccessLatency.UNKNOWN; + private boolean hasLimitedSize = false; + + public static Property from(PropertyInterface other) { + + Property property = new Property(); + property.accessLatency = other.getAccessLatency(); + property.expirationMode = other.getExpirationMode(); + property.hasLimitedSize = other.hasLimitedSize(); + property.retentionPolicy = other.getRetentionPolicy(); + property.totalNearlineSize = other.getTotalNearlineSize(); + property.totalOnlineSize = other.getTotalOnlineSize(); + return property; + } + + public TSizeInBytes getTotalOnlineSize() { + + return totalOnlineSize; + } + + public TSizeInBytes getTotalNearlineSize() { + + return totalNearlineSize; + } + + public RetentionPolicy getRetentionPolicy() { + + return retentionPolicy; + } + + public ExpirationMode getExpirationMode() { + + return expirationMode; + } + + public AccessLatency getAccessLatency() { + + return accessLatency; + } + + @Override + public boolean hasLimitedSize() { + + return hasLimitedSize; + } + + public void setTotalOnlineSize(String unitType, long onlineSize) throws NamespaceException { + + try { + this.totalOnlineSize = SizeUnitType.getInBytes(unitType, onlineSize); + } catch (InvalidTSizeAttributesException ex1) { + log.error("TotalOnlineSize parameter is wrong "); + throw new NamespaceException("'TotalOnlineSize' invalid argument in Namespace configuration.", + ex1); + } + } + + public void setTotalNearlineSize(String unitType, long nearlineSize) throws NamespaceException { + + try { + this.totalNearlineSize = SizeUnitType.getInBytes(unitType, nearlineSize); + } catch (InvalidTSizeAttributesException ex1) { + log.error("TotalOnlineSize parameter is wrong "); + throw new NamespaceException("'TotalOnlineSize' invalid argument in Namespace configuration.", + ex1); + } + } + + public void setRetentionPolicy(String retentionPolicy) throws NamespaceException { + + this.retentionPolicy = RetentionPolicy.getRetentionPolicy(retentionPolicy); + } + + public void setAccessLatency(String accessLatency) throws NamespaceException { + + this.accessLatency = AccessLatency.getAccessLatency(accessLatency); + } + + public void setExpirationMode(String expirationMode) throws NamespaceException { + + this.expirationMode = ExpirationMode.getExpirationMode(expirationMode); + } + + public void setLimitedSize(boolean limitedSize) throws NamespaceException { + + this.hasLimitedSize = limitedSize; + } + + /****************************************** + * VERSION 1.4 * + *******************************************/ + + public boolean isOnlineSpaceLimited() { + + return hasLimitedSize; + } + + public static class SizeUnitType { + + private static Logger log = LoggerFactory.getLogger(SizeUnitType.class); + + private String sizeTypeName; + private int ordinal; + private long size; - public static Property from(PropertyInterface other) { + public final static SizeUnitType BYTE = new SizeUnitType("Byte", 0, 1); + public final static SizeUnitType KB = new SizeUnitType("KB", 1, 1000); + public final static SizeUnitType MB = new SizeUnitType("MB", 2, 1000000); + public final static SizeUnitType GB = new SizeUnitType("GB", 3, 1000000000); + public final static SizeUnitType TB = new SizeUnitType("TB", 4, 1000000000000L); + public final static SizeUnitType UNKNOWN = new SizeUnitType("UNKNOWN", -1, -1); - Property property = new Property(); - property.accessLatency = other.getAccessLatency(); - property.expirationMode = other.getExpirationMode(); - property.hasLimitedSize = other.hasLimitedSize(); - property.retentionPolicy = other.getRetentionPolicy(); - property.totalNearlineSize = other.getTotalNearlineSize(); - property.totalOnlineSize = other.getTotalOnlineSize(); - return property; - } + private SizeUnitType(String sizeTypeName, int ordinal, long size) { - public TSizeInBytes getTotalOnlineSize() { + this.sizeTypeName = sizeTypeName; + this.size = size; + this.ordinal = ordinal; + } - return totalOnlineSize; - } + public String getTypeName() { - public TSizeInBytes getTotalNearlineSize() { - - return totalNearlineSize; - } + return this.sizeTypeName; + } - public RetentionPolicy getRetentionPolicy() { + private static SizeUnitType makeUnitType(String unitType) { - return retentionPolicy; - } - - public ExpirationMode getExpirationMode() { + SizeUnitType result = SizeUnitType.UNKNOWN; + if (unitType.equals(SizeUnitType.BYTE.sizeTypeName)) { + result = SizeUnitType.BYTE; + } + if (unitType.equals(SizeUnitType.KB.sizeTypeName)) { + result = SizeUnitType.KB; + } + if (unitType.equals(SizeUnitType.MB.sizeTypeName)) { + result = SizeUnitType.MB; + } + if (unitType.equals(SizeUnitType.GB.sizeTypeName)) { + result = SizeUnitType.GB; + } + if (unitType.equals(SizeUnitType.TB.sizeTypeName)) { + result = SizeUnitType.TB; + } + return result; + } + + public static TSizeInBytes getInBytes(String unitType, long value) + throws InvalidTSizeAttributesException { + + TSizeInBytes result = TSizeInBytes.makeEmpty(); + SizeUnitType sizeUnitType = makeUnitType(unitType); + if (!(sizeUnitType.getTypeName().equals(SizeUnitType.UNKNOWN.getTypeName()))) { + result = TSizeInBytes.make(value * sizeUnitType.size); + } + return result; + } - return expirationMode; - } - - public AccessLatency getAccessLatency() { + public TSizeInBytes getInBytes() { - return accessLatency; - } - - @Override - public boolean hasLimitedSize() { - - return hasLimitedSize; - } - - public void setTotalOnlineSize(String unitType, long onlineSize) - throws NamespaceException { - - try { - this.totalOnlineSize = SizeUnitType.getInBytes(unitType, onlineSize); - } catch (InvalidTSizeAttributesException ex1) { - log.error("TotalOnlineSize parameter is wrong "); - throw new NamespaceException( - "'TotalOnlineSize' invalid argument in Namespace configuration.", ex1); - } - } - - public void setTotalNearlineSize(String unitType, long nearlineSize) - throws NamespaceException { + TSizeInBytes result = TSizeInBytes.makeEmpty(); + try { + result = TSizeInBytes.make(this.size); + } catch (InvalidTSizeAttributesException ex) { + log.error("Size '" + this.size + "'are invalid. Use empty size: '" + result + "'." + ex); + } + return result; + } - try { - this.totalNearlineSize = SizeUnitType.getInBytes(unitType, nearlineSize); - } catch (InvalidTSizeAttributesException ex1) { - log.error("TotalOnlineSize parameter is wrong "); - throw new NamespaceException( - "'TotalOnlineSize' invalid argument in Namespace configuration.", ex1); - } - } - - public void setRetentionPolicy(String retentionPolicy) - throws NamespaceException { - - this.retentionPolicy = RetentionPolicy.getRetentionPolicy(retentionPolicy); - } - - public void setAccessLatency(String accessLatency) throws NamespaceException { - - this.accessLatency = AccessLatency.getAccessLatency(accessLatency); - } - - public void setExpirationMode(String expirationMode) - throws NamespaceException { - - this.expirationMode = ExpirationMode.getExpirationMode(expirationMode); - } - - public void setLimitedSize(boolean limitedSize) throws NamespaceException { - - this.hasLimitedSize = limitedSize; - } - - /****************************************** - * VERSION 1.4 * - *******************************************/ - - public boolean isOnlineSpaceLimited() { - - return hasLimitedSize; - } - - /** - * - *

- * Title: - *

- * - *

- * Description: - *

- * - *

- * Copyright: Copyright (c) 2007 - *

- * - *

- * Company: - *

- * - * @author not attributable - * @version 1.0 - */ - public static class SizeUnitType { - - private Logger log = NamespaceDirector.getLogger(); - - /** - * - **/ - - private String sizeTypeName; - private int ordinal; - private long size; - - public final static SizeUnitType BYTE = new SizeUnitType("Byte", 0, 1); - public final static SizeUnitType KB = new SizeUnitType("KB", 1, 1000); - public final static SizeUnitType MB = new SizeUnitType("MB", 2, 1000000); - public final static SizeUnitType GB = new SizeUnitType("GB", 3, 1000000000); - public final static SizeUnitType TB = new SizeUnitType("TB", 4, - 1000000000000L); - public final static SizeUnitType UNKNOWN = new SizeUnitType("UNKNOWN", -1, - -1); - - private SizeUnitType(String sizeTypeName, int ordinal, long size) { - - this.sizeTypeName = sizeTypeName; - this.size = size; - this.ordinal = ordinal; - } - - public String getTypeName() { - - return this.sizeTypeName; - } - - private static SizeUnitType makeUnitType(String unitType) { - - SizeUnitType result = SizeUnitType.UNKNOWN; - if (unitType.equals(SizeUnitType.BYTE.sizeTypeName)) { - result = SizeUnitType.BYTE; - } - if (unitType.equals(SizeUnitType.KB.sizeTypeName)) { - result = SizeUnitType.KB; - } - if (unitType.equals(SizeUnitType.MB.sizeTypeName)) { - result = SizeUnitType.MB; - } - if (unitType.equals(SizeUnitType.GB.sizeTypeName)) { - result = SizeUnitType.GB; - } - if (unitType.equals(SizeUnitType.TB.sizeTypeName)) { - result = SizeUnitType.TB; - } - return result; - } - - public static TSizeInBytes getInBytes(String unitType, long value) - throws InvalidTSizeAttributesException { - - TSizeInBytes result = TSizeInBytes.makeEmpty(); - SizeUnitType sizeUnitType = makeUnitType(unitType); - if (!(sizeUnitType.getTypeName().equals(SizeUnitType.UNKNOWN - .getTypeName()))) { - result = TSizeInBytes.make(value * sizeUnitType.size, SizeUnit.BYTES); - } - return result; - } - - public TSizeInBytes getInBytes() { - - TSizeInBytes result = TSizeInBytes.makeEmpty(); - try { - result = TSizeInBytes.make(this.size, SizeUnit.BYTES); - } catch (InvalidTSizeAttributesException ex) { - log.error("Size '" + this.size + "'are invalid. Use empty size: '" - + result + "'." + ex); - } - return result; - } - - } + } } diff --git a/src/main/java/it/grid/storm/namespace/model/Quota.java b/src/main/java/it/grid/storm/namespace/model/Quota.java index 6bb22d29e..dc46abab3 100644 --- a/src/main/java/it/grid/storm/namespace/model/Quota.java +++ b/src/main/java/it/grid/storm/namespace/model/Quota.java @@ -4,93 +4,87 @@ */ package it.grid.storm.namespace.model; -import it.grid.storm.namespace.NamespaceDirector; - -import org.slf4j.Logger; - public class Quota { - private final Logger log = NamespaceDirector.getLogger(); - - private boolean defined = false; - private boolean enabled = false; - private String device = null; - private QuotaType quotaType = null; + private boolean defined = false; + private boolean enabled = false; + private String device = null; + private QuotaType quotaType = null; - public Quota() { + public Quota() { - super(); - } + super(); + } - public Quota(boolean enabled, String device, QuotaType quotaType) { + public Quota(boolean enabled, String device, QuotaType quotaType) { - defined = true; - this.enabled = enabled; - this.device = device; - this.quotaType = quotaType; - } + defined = true; + this.enabled = enabled; + this.device = device; + this.quotaType = quotaType; + } - /** - * Read only attribute - * - * @return boolean - */ - public boolean getDefined() { + /** + * Read only attribute + * + * @return boolean + */ + public boolean getDefined() { - return defined; - } + return defined; + } - public boolean getEnabled() { + public boolean getEnabled() { - return enabled; - } + return enabled; + } - public void setEnabled(boolean enabled) { + public void setEnabled(boolean enabled) { - this.enabled = enabled; - } + this.enabled = enabled; + } - public String getDevice() { + public String getDevice() { - return device; - } + return device; + } - public void setDevice(String device) { + public void setDevice(String device) { - this.device = device; - } + this.device = device; + } - public QuotaType getQuotaType() { + public QuotaType getQuotaType() { - return quotaType; - } + return quotaType; + } - public void setQuotaType(QuotaType quotaType) { + public void setQuotaType(QuotaType quotaType) { - this.quotaType = quotaType; - } + this.quotaType = quotaType; + } - /** - * Return the value of UserName or GroupName or FileSetName. The meaning of - * the value depends on QuotaType. - * - * @return the quotaElementName - */ - public String getQuotaElementName() { + /** + * Return the value of UserName or GroupName or FileSetName. The meaning of the value depends on + * QuotaType. + * + * @return the quotaElementName + */ + public String getQuotaElementName() { - return quotaType.getValue(); - } + return quotaType.getValue(); + } - @Override - public String toString() { + @Override + public String toString() { - StringBuilder result = new StringBuilder(); - result.append("Quota : [ Defined:'" + defined + "' "); - result.append("Enabled:'" + enabled + "' "); - result.append("device:'" + device + "', "); - result.append("quotaType:'" + quotaType + " "); - result.append("]"); - return result.toString(); - } + StringBuilder result = new StringBuilder(); + result.append("Quota : [ Defined:'" + defined + "' "); + result.append("Enabled:'" + enabled + "' "); + result.append("device:'" + device + "', "); + result.append("quotaType:'" + quotaType + " "); + result.append("]"); + return result.toString(); + } } diff --git a/src/main/java/it/grid/storm/namespace/model/VirtualFS.java b/src/main/java/it/grid/storm/namespace/model/VirtualFS.java index b4f8c68df..b5745a86c 100644 --- a/src/main/java/it/grid/storm/namespace/model/VirtualFS.java +++ b/src/main/java/it/grid/storm/namespace/model/VirtualFS.java @@ -19,12 +19,10 @@ import com.google.common.collect.Lists; import it.grid.storm.balancer.BalancingStrategy; -import it.grid.storm.balancer.Node; import it.grid.storm.catalogs.ReservedSpaceCatalog; import it.grid.storm.common.GUID; import it.grid.storm.common.types.PFN; -import it.grid.storm.common.types.SizeUnit; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; import it.grid.storm.filesystem.Filesystem; import it.grid.storm.filesystem.FilesystemIF; import it.grid.storm.filesystem.GPFSSpaceSystem; @@ -40,9 +38,8 @@ import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.namespace.DefaultValuesInterface; import it.grid.storm.namespace.ExpiredSpaceTokenException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; import it.grid.storm.namespace.PropertyInterface; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.StoRIImpl; @@ -79,7 +76,7 @@ public class VirtualFS { FilesystemIF fsWrapper = null; List mappingRules = Lists.newArrayList(); List approachableRules = Lists.newArrayList(); - Configuration config; + StormConfiguration config; StorageClassType storageClass = null; TSpaceToken spaceToken; SAAuthzType saAuthzType = SAAuthzType.UNKNOWN; @@ -110,9 +107,8 @@ public void setFSDriver(Class fsDriver) throws NamespaceException { this.genericFS = makeFSInstance(); fsWrapper = RandomWaitFilesystemAdapter.maybeWrapFilesystem(fsWrapper); - this.fsWrapper = new MetricsFilesystemAdapter( - new Filesystem(getFSDriverInstance()), - METRIC_REGISTRY.getRegistry()); + this.fsWrapper = new MetricsFilesystemAdapter(new Filesystem(getFSDriverInstance()), + METRIC_REGISTRY.getRegistry()); } public void setSpaceTokenDescription(String spaceTokenDescription) { @@ -135,8 +131,7 @@ public void setProperties(PropertyInterface prop) { this.properties = prop; } - public void setSpaceSystemDriver(Class spaceDriver) - throws NamespaceException { + public void setSpaceSystemDriver(Class spaceDriver) throws NamespaceException { if (spaceDriver == null) { throw new NamespaceException("NULL space driver"); @@ -193,8 +188,7 @@ private String buildRootPath(String rootPath) throws NamespaceException { rootPathUri = new URI(rootPath); } catch (URISyntaxException e) { throw new NamespaceException( - "Unable to set rootPath. Invalid string. URISyntaxException: " - + e.getMessage()); + "Unable to set rootPath. Invalid string. URISyntaxException: " + e.getMessage()); } return rootPathUri.normalize().toString(); } @@ -236,11 +230,11 @@ public TSizeInBytes getUsedOnlineSpace() throws NamespaceException { TSizeInBytes result = TSizeInBytes.makeEmpty(); /** - * @todo : This method must contact Space Manager (or who for him) to - * retrieve the real situation + * @todo : This method must contact Space Manager (or who for him) to retrieve the real + * situation * - * @todo : Contact Space Catalog to retrieve the logical space occupied. - * This space must to be equal to space occupied in underlying FS. + * @todo : Contact Space Catalog to retrieve the logical space occupied. This space must to be + * equal to space occupied in underlying FS. */ return result; } @@ -249,11 +243,11 @@ public TSizeInBytes getAvailableOnlineSpace() throws NamespaceException { TSizeInBytes result = TSizeInBytes.makeEmpty(); /** - * @todo : This method must contact Space Manager (or who for him) to - * retrieve the real situation + * @todo : This method must contact Space Manager (or who for him) to retrieve the real + * situation * - * @todo : Contact Space Catalog to retrieve the logical space occupied. - * This space must to be equal to space occupied in underlying FS. + * @todo : Contact Space Catalog to retrieve the logical space occupied. This space must to be + * equal to space occupied in underlying FS. */ return result; @@ -302,17 +296,16 @@ public List getMappingRules() throws NamespaceException { if (this.mappingRules.isEmpty()) { throw new NamespaceException( - "No one MAPPING RULES bound with this VFS (" + aliasName + "). "); + "No one MAPPING RULES bound with this VFS (" + aliasName + "). "); } return this.mappingRules; } - public List getApproachableRules() - throws NamespaceException { + public List getApproachableRules() throws NamespaceException { if (this.approachableRules.isEmpty()) { throw new NamespaceException( - "No one APPROACHABLE RULES bound with this VFS (" + aliasName + "). "); + "No one APPROACHABLE RULES bound with this VFS (" + aliasName + "). "); } return this.approachableRules; } @@ -326,27 +319,23 @@ private genericfs makeFSInstance() throws NamespaceException { genericfs fs = null; if (fsDriver == null) { - throw new NamespaceException( - "Cannot build FS Driver istance without a valid Driver Class!"); + throw new NamespaceException("Cannot build FS Driver istance without a valid Driver Class!"); } Class fsArgumentsClass[] = new Class[1]; fsArgumentsClass[0] = String.class; - Object[] fsArguments = new Object[] { this.rootPath }; + Object[] fsArguments = new Object[] {this.rootPath}; Constructor fsConstructor = null; try { fsConstructor = fsDriver.getConstructor(fsArgumentsClass); } catch (SecurityException ex) { - log.error( - "Unable to retrieve the FS Driver Constructor. Security problem.", ex); + log.error("Unable to retrieve the FS Driver Constructor. Security problem.", ex); throw new NamespaceException( - "Unable to retrieve the FS Driver Constructor. Security problem.", ex); + "Unable to retrieve the FS Driver Constructor. Security problem.", ex); } catch (NoSuchMethodException ex) { - log.error( - "Unable to retrieve the FS Driver Constructor. Security problem.", ex); + log.error("Unable to retrieve the FS Driver Constructor. Security problem.", ex); throw new NamespaceException( - "Unable to retrieve the FS Driver Constructor. No such constructor.", - ex); + "Unable to retrieve the FS Driver Constructor. No such constructor.", ex); } try { fs = (genericfs) fsConstructor.newInstance(fsArguments); @@ -357,21 +346,17 @@ private genericfs makeFSInstance() throws NamespaceException { log.debug("VFS Ex Stack: "); ex1.printStackTrace(); - throw new NamespaceException("Unable to instantiate the FS Driver. ", - ex1); + throw new NamespaceException("Unable to instantiate the FS Driver. ", ex1); } catch (IllegalArgumentException ex1) { - log.error("Unable to instantiate the FS Driver. Using wrong argument.", - ex1); - throw new NamespaceException( - "Unable to instantiate the FS Driver. Using wrong argument.", ex1); + log.error("Unable to instantiate the FS Driver. Using wrong argument.", ex1); + throw new NamespaceException("Unable to instantiate the FS Driver. Using wrong argument.", + ex1); } catch (IllegalAccessException ex1) { log.error("Unable to instantiate the FS Driver. Illegal Access.", ex1); - throw new NamespaceException( - "Unable to instantiate the FS Driver. Illegal Access.", ex1); + throw new NamespaceException("Unable to instantiate the FS Driver. Illegal Access.", ex1); } catch (InstantiationException ex1) { log.error("Unable to instantiate the FS Driver. Generic problem..", ex1); - throw new NamespaceException( - "Unable to instantiate the FS Driver. Generic problem..", ex1); + throw new NamespaceException("Unable to instantiate the FS Driver. Generic problem..", ex1); } return fs; @@ -384,9 +369,8 @@ public FilesystemIF getFilesystem() throws NamespaceException { FilesystemIF fs = new Filesystem(getFSDriverInstance()); fs = RandomWaitFilesystemAdapter.maybeWrapFilesystem(fs); - - fsWrapper = new MetricsFilesystemAdapter(fs, - METRIC_REGISTRY.getRegistry()); + + fsWrapper = new MetricsFilesystemAdapter(fs, METRIC_REGISTRY.getRegistry()); } return this.fsWrapper; @@ -416,71 +400,53 @@ private SpaceSystem makeSpaceSystemInstance() throws NamespaceException { if (spaceSystemDriver == null) { throw new NamespaceException( - "Cannot build Space Driver istance without a valid Driver Class!"); + "Cannot build Space Driver istance without a valid Driver Class!"); } // Check if SpaceSystem is GPFSSpaceSystem used for GPFS FS // Check if SpaceSystem is MockSpaceSystem used for Posix FS - if ((this.spaceSystemDriver.getName() - .equals(GPFSSpaceSystem.class.getName())) - || (this.spaceSystemDriver.getName() - .equals(MockSpaceSystem.class.getName()))) { + if ((this.spaceSystemDriver.getName().equals(GPFSSpaceSystem.class.getName())) + || (this.spaceSystemDriver.getName().equals(MockSpaceSystem.class.getName()))) { // The class type argument is the mount point of GPFS file system Class ssArgumentsClass[] = new Class[1]; ssArgumentsClass[0] = String.class; - Object[] ssArguments = new Object[] { this.rootPath }; + Object[] ssArguments = new Object[] {this.rootPath}; Constructor ssConstructor = null; try { ssConstructor = spaceSystemDriver.getConstructor(ssArgumentsClass); } catch (SecurityException ex) { - log.error( - "Unable to retrieve the FS Driver Constructor. Security problem.", - ex); + log.error("Unable to retrieve the FS Driver Constructor. Security problem.", ex); throw new NamespaceException( - "Unable to retrieve the FS Driver Constructor. Security problem.", - ex); + "Unable to retrieve the FS Driver Constructor. Security problem.", ex); } catch (NoSuchMethodException ex) { - log.error( - "Unable to retrieve the FS Driver Constructor. Security problem.", - ex); + log.error("Unable to retrieve the FS Driver Constructor. Security problem.", ex); throw new NamespaceException( - "Unable to retrieve the FS Driver Constructor. No such constructor.", - ex); + "Unable to retrieve the FS Driver Constructor. No such constructor.", ex); } try { ss = (SpaceSystem) ssConstructor.newInstance(ssArguments); } catch (InvocationTargetException ex1) { - log.error("Unable to instantiate the SpaceSystem Driver. Wrong target.", - ex1); - throw new NamespaceException("Unable to instantiate the FS Driver. ", - ex1); + log.error("Unable to instantiate the SpaceSystem Driver. Wrong target.", ex1); + throw new NamespaceException("Unable to instantiate the FS Driver. ", ex1); } catch (IllegalArgumentException ex1) { - log.error( - "Unable to instantiate the SpaceSystem Driver. Using wrong argument.", - ex1); - throw new NamespaceException( - "Unable to instantiate the FS Driver. Using wrong argument.", ex1); + log.error("Unable to instantiate the SpaceSystem Driver. Using wrong argument.", ex1); + throw new NamespaceException("Unable to instantiate the FS Driver. Using wrong argument.", + ex1); } catch (IllegalAccessException ex1) { - log.error( - "Unable to instantiate the SpaceSystem Driver. Illegal Access.", ex1); - throw new NamespaceException( - "Unable to instantiate the FS Driver. Illegal Access.", ex1); + log.error("Unable to instantiate the SpaceSystem Driver. Illegal Access.", ex1); + throw new NamespaceException("Unable to instantiate the FS Driver. Illegal Access.", ex1); } catch (InstantiationException ex1) { - log.error( - "Unable to instantiate the SpaceSystem Driver. Generic problem..", - ex1); - throw new NamespaceException( - "Unable to instantiate the FS Driver. Generic problem..", ex1); + log.error("Unable to instantiate the SpaceSystem Driver. Generic problem..", ex1); + throw new NamespaceException("Unable to instantiate the FS Driver. Generic problem..", ex1); } } else { log.error("None Space System Driver built"); /** - * @todo : Perhaps a "genericSpaceSystem" could be more disederable rather - * than NULL + * @todo : Perhaps a "genericSpaceSystem" could be more disederable rather than NULL */ ss = null; } @@ -553,22 +519,21 @@ public StoRI createFile(String relativePath, StoRIType type) { return stori; } - public StoRI createFile(String relativePath, StoRIType type, MappingRule rule) - throws NamespaceException { + public StoRI createFile(String relativePath, StoRIType type, MappingRule rule) + throws NamespaceException { - return new StoRIImpl(this, rule, relativePath, type); - } + return new StoRIImpl(this, rule, relativePath, type); + } /**************************************************************** * Methods used by StoRI to perform IMPLICIT SPACE RESERVATION *****************************************************************/ /** - * Workaround to manage the DEFAULT SPACE TOKEN defined per Storage Area. This - * workaround simply give the possibility to define a list of DEFAULT SPACE - * TOKENs by the StoRM configuration file. If the token specified into the - * PrepareToPut request belongs to the list of default space token, the space - * file is not used (since it does not exists into the space catalog) and a + * Workaround to manage the DEFAULT SPACE TOKEN defined per Storage Area. This workaround simply + * give the possibility to define a list of DEFAULT SPACE TOKENs by the StoRM configuration file. + * If the token specified into the PrepareToPut request belongs to the list of default space + * token, the space file is not used (since it does not exists into the space catalog) and a * simple allocation of blocks is performed for the file * * Return true if the space token specified is a DEAFULT SPACE TOKENS. @@ -577,25 +542,21 @@ public StoRI createFile(String relativePath, StoRIType type, MappingRule rule) private Boolean isVOSAToken(TSpaceToken token) throws NamespaceException { - ReservedSpaceCatalog catalog = new ReservedSpaceCatalog(); - StorageSpaceData ssd = null; try { - ssd = catalog.getStorageSpace(token); + ssd = ReservedSpaceCatalog.getInstance().getStorageSpace(token); } catch (TransferObjectDecodingException e) { log.error( - "Unable to build StorageSpaceData from StorageSpaceTO. TransferObjectDecodingException: " - + e.getMessage()); + "Unable to build StorageSpaceData from StorageSpaceTO. TransferObjectDecodingException: " + + e.getMessage()); throw new NamespaceException( - "Error retrieving Storage Area space information. TransferObjectDecodingException : " - + e.getMessage()); + "Error retrieving Storage Area space information. TransferObjectDecodingException : " + + e.getMessage()); } catch (DataAccessException e) { - log - .error("Unable to get StorageSpaceTO from the DB. DataAccessException: " - + e.getMessage()); + log.error("Unable to get StorageSpaceTO from the DB. DataAccessException: " + e.getMessage()); throw new NamespaceException( - "Error retrieving Storage Area space information. DataAccessException : " - + e.getMessage()); + "Error retrieving Storage Area space information. DataAccessException : " + + e.getMessage()); } if ((ssd != null) && (ssd.getSpaceType().equals(TSpaceType.VOSPACE))) { @@ -606,15 +567,13 @@ private Boolean isVOSAToken(TSpaceToken token) throws NamespaceException { } public void makeSilhouetteForFile(StoRI stori, TSizeInBytes presumedSize) - throws NamespaceException { + throws NamespaceException { // Check if StoRI is a file if (!(stori.getStoRIType().equals(StoRIType.FILE))) { - log.error("Unable to associate a Space to the StoRI with type: " - + stori.getStoRIType()); + log.error("Unable to associate a Space to the StoRI with type: " + stori.getStoRIType()); throw new NamespaceException( - "Unable to associate a Space to the StoRI with type: " - + stori.getStoRIType()); + "Unable to associate a Space to the StoRI with type: " + stori.getStoRIType()); } // Retrieve the instance of the right Space System @@ -629,23 +588,19 @@ public void makeSilhouetteForFile(StoRI stori, TSizeInBytes presumedSize) } /* - * THis method is synchronized to avoid multiple execution from different - * thread. In such condition, the SpaceData is token at the same time from - * both thread , and then modified and updated. This means that one of the two - * update will be overwritten from the other thread! + * THis method is synchronized to avoid multiple execution from different thread. In such + * condition, the SpaceData is token at the same time from both thread , and then modified and + * updated. This means that one of the two update will be overwritten from the other thread! */ - public synchronized void useSpaceForFile(TSpaceToken token, StoRI file, - TSizeInBytes sizePresumed) + public synchronized void useSpaceForFile(TSpaceToken token, StoRI file, TSizeInBytes sizePresumed) throws NamespaceException, ExpiredSpaceTokenException { // Check if StoRI is a file if (!(file.getStoRIType().equals(StoRIType.FILE))) { - log.error("Unable to associate a Space to the StoRI with type: " - + file.getStoRIType()); + log.error("Unable to associate a Space to the StoRI with type: " + file.getStoRIType()); throw new NamespaceException( - "Unable to associate a Space to the StoRI with type: " - + file.getStoRIType()); + "Unable to associate a Space to the StoRI with type: " + file.getStoRIType()); } if (isVOSAToken(token)) { @@ -658,32 +613,30 @@ public synchronized void useSpaceForFile(TSpaceToken token, StoRI file, } /** - * Token for Dynamic space reservation specified. Go ahead in the old way, - * look into the space reservation catalog, ... + * Token for Dynamic space reservation specified. Go ahead in the old way, look into the space + * reservation catalog, ... */ // Use of Reserve Space Manager StorageSpaceData spaceData = null; try { - spaceData = new ReservedSpaceCatalog().getStorageSpace(token); + spaceData = ReservedSpaceCatalog.getInstance().getStorageSpace(token); } catch (TransferObjectDecodingException e) { log.error( - "Unable to build StorageSpaceData from StorageSpaceTO. TransferObjectDecodingException: " - + e.getMessage()); + "Unable to build StorageSpaceData from StorageSpaceTO. TransferObjectDecodingException: " + + e.getMessage()); throw new NamespaceException( - "Error retrieving Storage Area information from Token. TransferObjectDecodingException : " - + e.getMessage()); + "Error retrieving Storage Area information from Token. TransferObjectDecodingException : " + + e.getMessage()); } catch (DataAccessException e) { - log.error("Unable to build get StorageSpaceTO. DataAccessException: " - + e.getMessage()); + log.error("Unable to build get StorageSpaceTO. DataAccessException: " + e.getMessage()); throw new NamespaceException( - "Error retrieving Storage Area information from Token. DataAccessException : " - + e.getMessage()); + "Error retrieving Storage Area information from Token. DataAccessException : " + + e.getMessage()); } if (spaceData == null) { - throw new NamespaceException( - "No Storage Space stored with this token :" + token); + throw new NamespaceException("No Storage Space stored with this token :" + token); } // Check here if Space Reservation is expired @@ -739,23 +692,21 @@ public synchronized void useSpaceForFile(TSpaceToken token, StoRI file, // Create Space StoRI StoRI spaceFile = retrieveSpaceFileByPFN(pfn, totalSize); - if ((!(spaceFile.getLocalFile().exists())) - || (spaceFile.getLocalFile().isDirectory())) { + if ((!(spaceFile.getLocalFile().exists())) || (spaceFile.getLocalFile().isDirectory())) { log.error( - "Unable to get the correct space file!spaceFile does not exsists or it is a directory."); + "Unable to get the correct space file!spaceFile does not exsists or it is a directory."); return; } /** - * Splitting the Space File. In this first version the original space file - * is truncated at the original size minus the new ptp file size presumed, - * and a new space pre_allocation, bound with the new ptp file, is done. + * Splitting the Space File. In this first version the original space file is truncated at the + * original size minus the new ptp file size presumed, and a new space pre_allocation, bound + * with the new ptp file, is done. * - * @todo In the final version, if the new size requested is greater then - * the half of the original space file, the original spacefile is renamed - * to the desired ptp file name and then truncated to the requested size. - * A new space pre_allocation is perfored and bound with the old original - * space file name. + * @todo In the final version, if the new size requested is greater then the half of the + * original space file, the original spacefile is renamed to the desired ptp file name + * and then truncated to the requested size. A new space pre_allocation is perfored and + * bound with the old original space file name. * */ @@ -766,10 +717,8 @@ public synchronized void useSpaceForFile(TSpaceToken token, StoRI file, TSizeInBytes newUsedSpaceSize = TSizeInBytes.makeEmpty(); TSizeInBytes newAvailableSpaceSize = TSizeInBytes.makeEmpty(); try { - newUsedSpaceSize = TSizeInBytes - .make(totalSpaceSize.value() - remainingSize, SizeUnit.BYTES); - newAvailableSpaceSize = TSizeInBytes.make(remainingSize, - SizeUnit.BYTES); + newUsedSpaceSize = TSizeInBytes.make(totalSpaceSize.value() - remainingSize); + newAvailableSpaceSize = TSizeInBytes.make(remainingSize); } catch (InvalidTSizeAttributesException ex) { log.error("Unable to create Used Space Size, so use EMPTY size ", ex); } @@ -791,31 +740,27 @@ public synchronized void useSpaceForFile(TSpaceToken token, StoRI file, } /* - * This mehod should be Synchronized? Yes...: From the last internal - * discussion we had, we decide that use the entire available space for a - * single PtP request is not the right behaviour. The correct behaviour is - * that, if the presumed size is not specified as input parameter in the PtP - * request, only a part of the available spacefile is used. The size is the - * minimum between the default file size for the StoRM configuration file and - * the half size of the available spaceFile. TODO + * This method should be Synchronized? Yes...: From the last internal discussion we had, we decide + * that use the entire available space for a single PtP request is not the right behavior. The + * correct behavior is that, if the presumed size is not specified as input parameter in the PtP + * request, only a part of the available space-file is used. The size is the minimum between the + * default file size for the StoRM configuration file and the half size of the available + * spaceFile. TODO */ public synchronized void useAllSpaceForFile(TSpaceToken token, StoRI file) - throws NamespaceException, ExpiredSpaceTokenException { + throws NamespaceException, ExpiredSpaceTokenException { // Check if StoRI is a file if (!(file.getStoRIType().equals(StoRIType.FILE))) { - log.error("Unable to associate a Space to the StoRI with type: " - + file.getStoRIType()); + log.error("Unable to associate a Space to the StoRI with type: " + file.getStoRIType()); throw new NamespaceException( - "Unable to associate a Space to the StoRI with type: " - + file.getStoRIType()); + "Unable to associate a Space to the StoRI with type: " + file.getStoRIType()); } // Get the default space size TSizeInBytes defaultFileSize = null; try { - defaultFileSize = TSizeInBytes - .make(Configuration.getInstance().getFileDefaultSize(), SizeUnit.BYTES); + defaultFileSize = TSizeInBytes.make(StormConfiguration.getInstance().getFileDefaultSize()); } catch (it.grid.storm.srm.types.InvalidTSizeAttributesException e) { log.debug("Invalid size created."); } @@ -823,25 +768,23 @@ public synchronized void useAllSpaceForFile(TSpaceToken token, StoRI file) // Use of Reserve Space Manager StorageSpaceData spaceData = null; try { - spaceData = new ReservedSpaceCatalog().getStorageSpace(token); + spaceData = ReservedSpaceCatalog.getInstance().getStorageSpace(token); } catch (TransferObjectDecodingException e) { log.error( - "Unable to build StorageSpaceData from StorageSpaceTO. TransferObjectDecodingException: " - + e.getMessage()); + "Unable to build StorageSpaceData from StorageSpaceTO. TransferObjectDecodingException: " + + e.getMessage()); throw new NamespaceException( - "Error retrieving Storage Area information from Token. TransferObjectDecodingException : " - + e.getMessage()); + "Error retrieving Storage Area information from Token. TransferObjectDecodingException : " + + e.getMessage()); } catch (DataAccessException e) { - log.error("Unable to build get StorageSpaceTO. DataAccessException: " - + e.getMessage()); + log.error("Unable to build get StorageSpaceTO. DataAccessException: " + e.getMessage()); throw new NamespaceException( - "Error retrieving Storage Area information from Token. DataAccessException : " - + e.getMessage()); + "Error retrieving Storage Area information from Token. DataAccessException : " + + e.getMessage()); } if (spaceData == null) { - throw new NamespaceException( - "No Storage Space stored with this token :" + token); + throw new NamespaceException("No Storage Space stored with this token :" + token); } // Check here if Space Reservation is expired @@ -857,18 +800,11 @@ public synchronized void useAllSpaceForFile(TSpaceToken token, StoRI file) if (isVOSAToken(token)) { // ADD HERE THE LOGIC TO MANAGE DEFAULT SPACE RESERVATION /** - * Check if a DEFAULT SPACE TOKEN is specified. IN that case do nothing - * and create a simple silhouette for the file... - * - * - * TOREMOVE. The space data will contains this information!!! i METADATA - * non venfgono agrgiornati, sara fatta una funzionalita' nella - * getspacemetadatacatalog che in caso di query sul defaulr space token - * vada a vedre la quota sul file system. - * + * Check if a DEFAULT SPACE TOKEN is specified. IN that case do nothing and create a simple + * silhouette for the file... */ - // WARNING, This double check have to be removed, the firs should be fdone - // on teh space type + // WARNING, This double check have to be removed, the first should be done + // on the space type Boolean found = isVOSAToken(token); if (found) { try { @@ -882,8 +818,8 @@ public synchronized void useAllSpaceForFile(TSpaceToken token, StoRI file) } else { /** - * Token for Dynamic space reservation specified. Go ahead in the old way, - * look into the space reservation catalog, ... + * Token for Dynamic space reservation specified. Go ahead in the old way, look into the space + * reservation catalog, ... */ // Check here if Space Reservation is expired @@ -909,8 +845,7 @@ public synchronized void useAllSpaceForFile(TSpaceToken token, StoRI file) } else { TSizeInBytes fileSizeToUse = null; try { - fileSizeToUse = TSizeInBytes.make(availableSpaceSize.value() / 2, - SizeUnit.BYTES); + fileSizeToUse = TSizeInBytes.make(availableSpaceSize.value() / 2); } catch (it.grid.storm.srm.types.InvalidTSizeAttributesException e) { log.debug("Invalid size created."); } @@ -925,13 +860,13 @@ public synchronized void useAllSpaceForFile(TSpaceToken token, StoRI file) * Methods used by StoRI to perform EXPLICIT SPACE RESERVATION *****************************************************************/ - public StoRI createSpace(String relativePath, long guaranteedSize, - long totalSize) throws NamespaceException { + public StoRI createSpace(String relativePath, long guaranteedSize, long totalSize) + throws NamespaceException { StoRIType type = StoRIType.SPACE; /* - * TODO Mapping rule should be choosen from the appropriate app-rule - * presents in the namespace.xml file... + * TODO Mapping rule should be chosen from the appropriate approachable-rule presents in the + * namespace.xml file... */ StoRI stori = new StoRIImpl(this, mappingRules.get(0), relativePath, type); @@ -940,54 +875,49 @@ public StoRI createSpace(String relativePath, long guaranteedSize, TSizeInBytes guarSize = TSizeInBytes.makeEmpty(); try { - guarSize = TSizeInBytes.make(guaranteedSize, SizeUnit.BYTES); + guarSize = TSizeInBytes.make(guaranteedSize); } catch (InvalidTSizeAttributesException ex1) { log.error("Unable to create Guaranteed Size, so use EMPTY size ", ex1); } TSizeInBytes totSize = TSizeInBytes.makeEmpty(); try { - totSize = TSizeInBytes.make(totalSize, SizeUnit.BYTES); + totSize = TSizeInBytes.make(totalSize); } catch (InvalidTSizeAttributesException ex2) { log.error("Unable to create Total Size, so use EMPTY size", ex2); } - Space space = createSpace(guarSize, totSize, stori.getLocalFile(), - spaceSystem); + Space space = createSpace(guarSize, totSize, stori.getLocalFile(), spaceSystem); stori.setSpace(space); return stori; } - public StoRI createSpace(String relativePath, long totalsize) - throws NamespaceException { + public StoRI createSpace(String relativePath, long totalsize) throws NamespaceException { StoRI stori = createSpace(relativePath, totalsize, totalsize); return stori; } /** - * This method is used to split the specified spaceFile to the desired PtP - * file. The operations performed depends on the input parameters. If the - * desired new size is minor then the half of the total reserved space size, - * the original space file is truncated to new size : (original size - new PtP - * file presumed size), then a new space_preallocation, of the new PtP file + * This method is used to split the specified spaceFile to the desired PtP file. The operations + * performed depends on the input parameters. If the desired new size is minor then the half of + * the total reserved space size, the original space file is truncated to new size : (original + * size - new PtP file presumed size), then a new space_preallocation, of the new PtP file * presumed size, is bound to the requested file. * - * If the presumed size is greater then the half fo the global space - * available, the original space file is renamed to the new PtP file and - * truncated to the presumed size. A new space_preallocation is done to - * recreate the remaining original space file + * If the presumed size is greater then the half fo the global space available, the original space + * file is renamed to the new PtP file and truncated to the presumed size. A new + * space_preallocation is done to recreate the remaining original space file * - * @param spaceOrig StoRI bounds to the original space file. @param file StoRI - * bounds to the desired new PtP file. @param long new PtP file size - * presumed. @returns new Size + * @param spaceOrig StoRI bounds to the original space file. @param file StoRI bounds to the + * desired new PtP file. @param long new PtP file size presumed. @returns new Size */ public TSizeInBytes splitSpace(StoRI spaceOrig, StoRI file, long sizePresumed) - throws NamespaceException { + throws NamespaceException { // Update Storage Space to new values of size TSizeInBytes newSize = TSizeInBytes.makeEmpty(); @@ -995,13 +925,11 @@ public TSizeInBytes splitSpace(StoRI spaceOrig, StoRI file, long sizePresumed) // Save the name of the current Space File String spacePFN = spaceOrig.getAbsolutePath(); log.debug("VFS Split: spaceFileName:" + spacePFN); - String relativeSpacePFN = NamespaceUtil - .extractRelativePath(this.getRootPath(), spacePFN); + String relativeSpacePFN = NamespaceUtil.extractRelativePath(this.getRootPath(), spacePFN); /** * extractRelativePath seems not working in this case! WHY? * - * @todo Because the mapping rule choosen is always the same, for all - * StFNRoot...BUG to FIX.. + * @todo Because the mapping rule chosen is always the same, for all StFNRoot...BUG to FIX.. * */ log.debug("Looking for root:" + this.getRootPath()); @@ -1017,12 +945,12 @@ public TSizeInBytes splitSpace(StoRI spaceOrig, StoRI file, long sizePresumed) log.debug("VFS Split: relativeSpacePFN:" + relativeSpacePFN); if (failure) { - log.warn( - "SpacePFN does not refer to this VFS root! Something goes wrong in app-rule?"); + log.warn("SpacePFN does not refer to this VFS root! Something goes wrong in app-rule?"); try { - newSize = TSizeInBytes.make(sizePresumed, SizeUnit.BYTES); - file = createSpace(NamespaceUtil.extractRelativePath(this.getRootPath(), - file.getAbsolutePath()), sizePresumed); + newSize = TSizeInBytes.make(sizePresumed); + file = createSpace( + NamespaceUtil.extractRelativePath(this.getRootPath(), file.getAbsolutePath()), + sizePresumed); file.getSpace().allot(); } catch (InvalidTSizeAttributesException ex) { log.error("Unable to create UNUsed Space Size, so use EMPTY size ", ex); @@ -1036,13 +964,12 @@ public TSizeInBytes splitSpace(StoRI spaceOrig, StoRI file, long sizePresumed) long realSize = spaceOrig.getLocalFile().getSize(); /** - * The next steps depends on the input parameters. Case (1) : new PtP file - * size minor than the half of the available space file. In this case the - * spaceFile is truncated, and a new file is created with the desired - * amount of preallocated blocks. Case(2) : new PtP file size greater than - * the half of the available space file. The spaceFile is renamed to the - * new PtP file, truncated to the presumed size and a new preallocation is - * done bound to the original space file name. + * The next steps depends on the input parameters. Case (1) : new PtP file size minor than the + * half of the available space file. In this case the spaceFile is truncated, and a new file + * is created with the desired amount of preallocated blocks. Case(2) : new PtP file size + * greater than the half of the available space file. The spaceFile is renamed to the new PtP + * file, truncated to the presumed size and a new preallocation is done bound to the original + * space file name. * */ @@ -1050,20 +977,19 @@ public TSizeInBytes splitSpace(StoRI spaceOrig, StoRI file, long sizePresumed) log.debug("SplitSpace Case (1)"); // Truncate - log.debug("SplitSpace: " + spaceOrig.getAbsolutePath() - + " truncating file to size:" + (realSize - sizePresumed)); - spaceOrig.getSpace().getSpaceFile() - .truncateFile((realSize - sizePresumed)); + log.debug("SplitSpace: " + spaceOrig.getAbsolutePath() + " truncating file to size:" + + (realSize - sizePresumed)); + spaceOrig.getSpace().getSpaceFile().truncateFile((realSize - sizePresumed)); // Allocate space for file try { - newSize = TSizeInBytes.make(sizePresumed, SizeUnit.BYTES); - file = createSpace(NamespaceUtil.extractRelativePath( - this.getRootPath(), file.getAbsolutePath()), sizePresumed); + newSize = TSizeInBytes.make(sizePresumed); + file = createSpace( + NamespaceUtil.extractRelativePath(this.getRootPath(), file.getAbsolutePath()), + sizePresumed); file.getSpace().allot(); } catch (InvalidTSizeAttributesException ex) { - log.error("Unable to create UNUsed Space Size, so use EMPTY size ", - ex); + log.error("Unable to create UNUsed Space Size, so use EMPTY size ", ex); } catch (it.grid.storm.filesystem.ReservationException e2) { log.error("Unable to create space into File System"); } @@ -1084,17 +1010,15 @@ public TSizeInBytes splitSpace(StoRI spaceOrig, StoRI file, long sizePresumed) long remainingSize = realSize - sizePresumed; try { - newSize = TSizeInBytes.make(remainingSize, SizeUnit.BYTES); + newSize = TSizeInBytes.make(remainingSize); // Create a new Space file with the old name and with the size // computed. - spaceOrig = createSpace( - NamespaceUtil.extractRelativePath(this.getRootPath(), spacePFN), - newSize.value()); + spaceOrig = createSpace(NamespaceUtil.extractRelativePath(this.getRootPath(), spacePFN), + newSize.value()); // Create the new SpaceFile into the file system spaceOrig.getSpace().allot(); } catch (InvalidTSizeAttributesException ex) { - log.error("Unable to create UNUsed Space Size, so use EMPTY size ", - ex); + log.error("Unable to create UNUsed Space Size, so use EMPTY size ", ex); } catch (it.grid.storm.filesystem.ReservationException e2) { log.error("Unable to create space into File System"); } @@ -1110,8 +1034,7 @@ public TSizeInBytes splitSpace(StoRI spaceOrig, StoRI file, long sizePresumed) * Methods used by Space Reservation Manager *************************************************/ - public StoRI createSpace(long guarSize, long totalSize) - throws NamespaceException { + public StoRI createSpace(long guarSize, long totalSize) throws NamespaceException { // retrieve SPACE FILE NAME String relativePath = makeSpaceFilePath(); @@ -1141,8 +1064,7 @@ public StoRI createSpace() throws NamespaceException { TSizeInBytes guarSize = defValue.getDefaultGuaranteedSpaceSize(); // retrieve DEFAULT TOTAL size TSizeInBytes totalSize = defValue.getDefaultTotalSpaceSize(); - StoRI stori = createSpace(relativePath, guarSize.value(), - totalSize.value()); + StoRI stori = createSpace(relativePath, guarSize.value(), totalSize.value()); return stori; } @@ -1163,8 +1085,7 @@ public String toString() { sb.append(" VFS Name : '" + this.aliasName + "'" + sep); sb.append(" VFS root : '" + this.rootPath + "'" + sep); sb.append(" VFS FS driver : '" + this.fsDriver.getName() + "'" + sep); - sb.append( - " VFS Space driver : '" + this.spaceSystemDriver.getName() + "'" + sep); + sb.append(" VFS Space driver : '" + this.spaceSystemDriver.getName() + "'" + sep); sb.append(" -- DEFAULT VALUES --" + sep); sb.append(this.defValue); sb.append(" -- CAPABILITY --" + sep); @@ -1191,51 +1112,44 @@ private String makeSpaceFilePath() throws NamespaceException { return result; } - private Space createSpace(TSizeInBytes guarSize, TSizeInBytes totalSize, - LocalFile file, SpaceSystem spaceSystem) throws NamespaceException { + private Space createSpace(TSizeInBytes guarSize, TSizeInBytes totalSize, LocalFile file, + SpaceSystem spaceSystem) throws NamespaceException { Space space = null; try { space = new Space(guarSize, totalSize, file, spaceSystem); } catch (InvalidSpaceAttributesException ex3) { log.error("Error while retrieving Space System Driver for VFS ", ex3); - throw new NamespaceException( - "Error while retrieving Space System Driver for VFS ", ex3); + throw new NamespaceException("Error while retrieving Space System Driver for VFS ", ex3); } return space; } - public StorageSpaceData getSpaceByAlias(String desc) - throws NamespaceException { + public StorageSpaceData getSpaceByAlias(String desc) throws NamespaceException { // Retrieve Storage Space from Persistence - ReservedSpaceCatalog catalog = new ReservedSpaceCatalog(); - StorageSpaceData spaceData = catalog.getStorageSpaceByAlias(desc); - return spaceData; + return ReservedSpaceCatalog.getInstance().getStorageSpaceByAlias(desc); } - public void storeSpaceByToken(StorageSpaceData spaceData) - throws NamespaceException { + public void storeSpaceByToken(StorageSpaceData spaceData) throws NamespaceException { // Retrieve Storage Space from Persistence - ReservedSpaceCatalog catalog = new ReservedSpaceCatalog(); try { - catalog.updateStorageSpace(spaceData); + ReservedSpaceCatalog.getInstance().updateStorageSpace(spaceData); } catch (DataAccessException e) { log.error(e.getMessage(), e); } } - public StoRI retrieveSpaceFileByPFN(PFN pfn, long totalSize) - throws NamespaceException { + public StoRI retrieveSpaceFileByPFN(PFN pfn, long totalSize) throws NamespaceException { - NamespaceInterface namespace = NamespaceDirector.getNamespace(); + Namespace namespace = Namespace.getInstance(); StoRI stori = namespace.resolveStoRIbyPFN(pfn); stori.setStoRIType(StoRIType.SPACE); - // Create the Space istance + // Create the Space instance log.debug("VFS: retrieveSpace, relative {}-{}", stori.getRelativePath(), stori); StoRI space = createSpace(stori.getRelativeStFN(), totalSize); - // Assign this istance to StoRI created + // Assign this instance to StoRI created stori.setSpace(space.getSpace()); return stori; } @@ -1283,8 +1197,7 @@ public String getStorageAreaAuthzFixed() throws NamespaceException { if (getStorageAreaAuthzType().equals(SAAuthzType.FIXED)) { return saAuthzSourceName; } else { - throw new NamespaceException( - "Required FIXED-AUTHZ, but it is UNDEFINED."); + throw new NamespaceException("Required FIXED-AUTHZ, but it is UNDEFINED."); } } diff --git a/src/main/java/it/grid/storm/namespace/naming/NamespaceUtil.java b/src/main/java/it/grid/storm/namespace/naming/NamespaceUtil.java index e2ffcaa23..c5e415361 100644 --- a/src/main/java/it/grid/storm/namespace/naming/NamespaceUtil.java +++ b/src/main/java/it/grid/storm/namespace/naming/NamespaceUtil.java @@ -18,7 +18,7 @@ import com.google.common.collect.Lists; import it.grid.storm.griduser.VONameMatchingRule; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.model.MappingRule; import it.grid.storm.namespace.model.VirtualFS; @@ -26,555 +26,552 @@ public class NamespaceUtil { - private static final Logger log = LoggerFactory.getLogger(NamespaceUtil.class); - - /** - * PRIVATE Constructor - */ - private NamespaceUtil() { - - } - - /** - * Compute the distance between two path. Return -1 when the two path are different completely. - * - * @param path1 String - * @param path2 String - * @return int - */ - public static int computeDistanceFromPath(String path1, String path2) { - - return (new Path(path1)).distance(new Path(path2)); - } - - /** - * Retrieve all path elements within path - * - * @param path String - * @return Collection - */ - public static List getPathElement(String path) { - - return (new Path(path)).getPathElements(); - } - - /** - * getFileName - * - * @param stfn String - * @return String - */ - public static String getFileName(String stfn) { - - if (stfn != null) { - if (stfn.endsWith(NamingConst.SEPARATOR)) { - return ""; - } else { - Path path = new Path(stfn); - int length = path.getLength(); - if (length > 0) { - PathElement elem = path.getElementAt(length - 1); - return elem.toString(); - } else { - return ""; - } - } - } else { - return ""; - } - } - - /** - * Return all the VFS residing on a specified path (mount-point) - * - * @param mountPointPath - * @return the set - */ - public static Collection getResidentVFS(String mountPointPath) { - - List vfsSet = NamespaceDirector.getNamespace().getAllDefinedVFS(); - for (VirtualFS vfs : vfsSet) { - String vfsRootPath; - boolean enclosed; - - vfsRootPath = vfs.getRootPath(); - enclosed = NamespaceUtil.isEnclosed(mountPointPath, vfsRootPath); - if (!enclosed) { - vfsSet.remove(vfs); - } - } - return vfsSet; - } - - public static String consumeFileName(String file) { - - if (file != null) { - if (file.endsWith(NamingConst.SEPARATOR)) { - return file; - } else { - Path path = new Path(file); - int length = path.getLength(); - if (length > 1) { - return path.getSubPath(length - 1).getPath() + NamingConst.SEPARATOR; - } else { - return Path.PATH_SEPARATOR; - } - } - } else { - return Path.PATH_SEPARATOR; - } - } - - /** - * get - * - * @param stfn String - * @return String - */ - public static String getStFNPath(String stfn) { - - return consumeFileName(stfn); - } - - public static String consumeElement(String stfnPath) { - - Path path = new Path(stfnPath); - int length = path.getLength(); - if (length > 1) { - return path.getSubPath(length - 1).getPath() + NamingConst.SEPARATOR; - } else { - return ""; - } - } - - public static String extractRelativePath(String root, String absolute) { - - if (absolute.startsWith(root)) { - Path rootPath = new Path(root); - int rootLength = rootPath.getLength(); - - Path absPath = new Path(absolute); - List elem = Lists.newArrayList(); - - for (int i = 0; i < absPath.getLength(); i++) { - // Why use length and not compare single element? - if (i >= rootLength) { - elem.add(absPath.getElementAt(i)); - } - } - Path result = new Path(elem, false); - - return result.getPath(); - } else { - return absolute; - } - } - - /** - * Is the first path within the second one? - * - * @param root - * @param wrapperCandidate - * @return - */ - public static boolean isEnclosed(String root, String wrapperCandidate) { - - boolean result = false; - Path rootPath = new Path(root); - Path wrapperPath = new Path(wrapperCandidate); - result = rootPath.isEnclosed(wrapperPath); - return result; - } - - /** - * - * @param stfnPath - * @param vfsApproachable - * @return the mapped rule or null if not found - */ - public static MappingRule getWinnerRule(String stfnPath, Collection mappingRules, - Collection vfsApproachable) { - - Preconditions.checkNotNull(stfnPath, "Unable to get winning rule: invalid null stfnPath"); - Preconditions.checkNotNull(mappingRules, - "Unable to get winning rule: invalid null mapping rules"); - Preconditions.checkNotNull(vfsApproachable, - "Unable to get winning rule: invalid null VFS list"); - - if (mappingRules.isEmpty()) { - log.warn("Unable to get winning rule: empty mapping rules"); - return null; - } - - if (vfsApproachable.isEmpty()) { - log.debug("Unable to get winning rule: empty VFS list"); - return null; - } - - log.debug("Searching winner rule for {}", stfnPath); - MappingRule winnerRule = null; - - Vector rules = new Vector(mappingRules); - - int minDistance = Integer.MAX_VALUE; - for (MappingRule rule : rules) { - if (isEnclosed(rule.getStFNRoot(), stfnPath) - && vfsApproachable.contains(rule.getMappedFS())) { - int distance = computeDistanceFromPath(rule.getStFNRoot(), stfnPath); - if (distance < minDistance) { - minDistance = distance; - winnerRule = rule; - } - } - } - return winnerRule; - } - - public static MappingRule getWinnerRule(TSURL surl, Collection mappingRules, - Collection vfsApproachable) { - - return getWinnerRule(surl.sfn().stfn().toString(), mappingRules, vfsApproachable); - } - - public static VirtualFS getWinnerVFS(String absolutePath, - Map vfsListByRootPath) throws NamespaceException { - - VirtualFS vfsWinner = null; - int distance = Integer.MAX_VALUE; - for (String vfsRoot : vfsListByRootPath.keySet()) { - int d = computeDistanceFromPath(vfsRoot, absolutePath); - log.debug("Pondering VFS Root '{}' against '{}'. Distance = {}", vfsRoot, absolutePath, d); - if (d < distance) { - boolean enclosed = isEnclosed(vfsRoot, absolutePath); - if (enclosed) { - distance = d; - vfsWinner = vfsListByRootPath.get(vfsRoot); - log.debug("Partial winner is {} (VFS: {})", vfsRoot, vfsWinner.getAliasName()); - } - } - } - if (vfsWinner == null) { - log.error("Unable to found a VFS compatible with path: '{}'", absolutePath); - throw new NamespaceException( - "Unable to found a VFS compatible with path :'" + absolutePath + "'"); - } - return vfsWinner; - } - - public static String resolveVOName(String filename, - Map vfsListByRootPath) throws NamespaceException { - - VirtualFS vfs = getWinnerVFS(filename, vfsListByRootPath); - /* NamespaceException raised if vfs is not found => vfs is not null */ - VONameMatchingRule rule = - vfs.getApproachableRules().get(0).getSubjectRules().getVONameMatchingRule(); - return rule.getVOName(); - } - - /** - * ===================== INNER CLASSES ====================== - */ - - /** - * - *

- * Title: - *

- * - *

- * Description: - *

- * - */ - static class PathElement { - - private final String pathChunk; - - public PathElement(String path) { - - this.pathChunk = path; - } - - public String getPathChunk() { - - return this.pathChunk; - } - - @Override - public int hashCode() { - - return this.pathChunk.hashCode(); - } - - @Override - public boolean equals(Object obj) { - - boolean result = true; - if (!(obj instanceof PathElement)) { - result = false; - } else { - PathElement other = (PathElement) obj; - result = (this.getPathChunk()).equals(other.getPathChunk()); - } - return result; - } - - @Override - public String toString() { - - return pathChunk; - } - } - - /** - * - *

- * Title: - *

- * - *

- * Description: - *

- * - */ - private static class Path { - - private List path; - private static String PATH_SEPARATOR = "/"; - public static final String[] EMPTY_STRING_ARRAY = {}; - public boolean directory; - public boolean absolutePath; - - public Path() { - - this.path = Lists.newArrayList(); - this.directory = false; - this.absolutePath = true; - } - - public Path(List path, boolean absolutePath) { - - this.path = path; - this.directory = false; - this.absolutePath = absolutePath; - } - - public Path(String path) { - - // Factorize path into array of PathElement... - if (path.startsWith(PATH_SEPARATOR)) { - this.absolutePath = true; - } else { - this.absolutePath = false; - } - if (path.endsWith(PATH_SEPARATOR)) { - this.directory = true; - } else { - this.directory = false; - } - - String[] pathElements = factorizePath(path); - if (pathElements != null) { - // ...and build Path - this.path = Lists.newArrayList(); - for (String pathElement : pathElements) { - addPathElement(new PathElement(pathElement)); - } - } - } - - public String[] factorizePath(String path) { - - return toStringArray(path, PATH_SEPARATOR); - } - - public List getPathElements() { - - List result = Lists.newArrayList(); - Iterator scan = path.iterator(); - while (scan.hasNext()) { - PathElement p = scan.next(); - result.add(p.toString()); - } - return result; - } - - private String[] toStringArray(String value, String delim) { - - if (value != null) { - return split(delim, value); - } else { - return EMPTY_STRING_ARRAY; - } - } - - private String[] split(String seperators, String list) { - - return split(seperators, list, false); - } - - private String[] split(String seperators, String list, boolean include) { - - StringTokenizer tokens = new StringTokenizer(list, seperators, include); - String[] result = new String[tokens.countTokens()]; - int i = 0; - while (tokens.hasMoreTokens()) { - result[i++] = tokens.nextToken(); - } - return result; - } - - public String getPath() { - - StringBuilder buf = new StringBuilder(); - if (this.absolutePath) { - buf.append(PATH_SEPARATOR); - } - for (Iterator iter = path.iterator(); iter.hasNext();) { - PathElement item = iter.next(); - buf.append(item.getPathChunk()); - if (iter.hasNext()) { - buf.append(PATH_SEPARATOR); - } - } - if (this.directory) { - buf.append(PATH_SEPARATOR); - } - return buf.toString(); - } - - public int getLength() { - - if (path != null) { - return path.size(); - } else { - return 0; - } - } - - /** - * - * @param position int - * @return PathElement - */ - public PathElement getElementAt(int position) { - - if (position < getLength()) { - return this.path.get(position); - } else { - return null; - } - } - - /** - * - * @param obj Object - * @return boolean - */ - @Override - public boolean equals(Object obj) { - - boolean result = true; - if (!(obj instanceof Path)) { - result = false; - } else { - Path other = (Path) obj; - if (other.getLength() != this.getLength()) { - result = false; - } else { - int size = this.getLength(); - for (int i = 0; i < size; i++) { - if (!(this.getElementAt(i)).equals(other.getElementAt(i))) { - result = false; - break; - } - } - } - } - return result; - } - - /** - * - * @param pathChunk PathElement - */ - public void addPathElement(PathElement pathChunk) { - - this.path.add(pathChunk); - } - - /** - * - * @param elements int - * @return Path - */ - public Path getSubPath(int elements) { - - Path result = new Path(); - for (int i = 0; i < elements; i++) { - result.addPathElement(this.getElementAt(i)); - } - return result; - } - - /** - * - * @param wrapperCandidate Path - * @return boolean - */ - public boolean isEnclosed(Path wrapperCandidate) { - - boolean result = false; - if (this.getLength() > wrapperCandidate.getLength()) { - result = false; - } else { - Path other = wrapperCandidate.getSubPath(this.getLength()); - result = other.equals(this); - } - return result; - } - - /** - * - * @param other Path - * @return int - */ - public int distance(Path other) { - - int result = -1; - Path a; - Path b; - if (this.getLength() > other.getLength()) { - a = this; - b = other; - } else { - a = other; - b = this; - } - if (b.isEnclosed(a)) { - result = (a.getLength() - b.getLength()); - } else { - result = a.getLength() + b.getLength(); - } - return result; - } - - /** - * - * @return String - */ - @Override - public String toString() { - - StringBuilder buf = new StringBuilder(); - buf.append("["); - for (int i = 0; i < this.getLength(); i++) { - buf.append(" "); - buf.append(this.getElementAt(i).getPathChunk()); - } - buf.append(" ]"); - return buf.toString(); - } - } + private static final Logger log = LoggerFactory.getLogger(NamespaceUtil.class); + + private NamespaceUtil() { + + } + + /** + * Compute the distance between two path. Return -1 when the two path are different completely. + * + * @param path1 String + * @param path2 String + * @return int + */ + public static int computeDistanceFromPath(String path1, String path2) { + + return (new Path(path1)).distance(new Path(path2)); + } + + /** + * Retrieve all path elements within path + * + * @param path String + * @return Collection + */ + public static List getPathElement(String path) { + + return (new Path(path)).getPathElements(); + } + + /** + * getFileName + * + * @param stfn String + * @return String + */ + public static String getFileName(String stfn) { + + if (stfn != null) { + if (stfn.endsWith(NamingConst.SEPARATOR)) { + return ""; + } else { + Path path = new Path(stfn); + int length = path.getLength(); + if (length > 0) { + PathElement elem = path.getElementAt(length - 1); + return elem.toString(); + } else { + return ""; + } + } + } else { + return ""; + } + } + + /** + * Return all the VFS residing on a specified path (mount-point) + * + * @param mountPointPath + * @return the set + */ + public static Collection getResidentVFS(String mountPointPath) { + + List vfsSet = Namespace.getInstance().getAllDefinedVFS(); + for (VirtualFS vfs : vfsSet) { + String vfsRootPath; + boolean enclosed; + + vfsRootPath = vfs.getRootPath(); + enclosed = NamespaceUtil.isEnclosed(mountPointPath, vfsRootPath); + if (!enclosed) { + vfsSet.remove(vfs); + } + } + return vfsSet; + } + + public static String consumeFileName(String file) { + + if (file != null) { + if (file.endsWith(NamingConst.SEPARATOR)) { + return file; + } else { + Path path = new Path(file); + int length = path.getLength(); + if (length > 1) { + return path.getSubPath(length - 1).getPath() + NamingConst.SEPARATOR; + } else { + return Path.PATH_SEPARATOR; + } + } + } else { + return Path.PATH_SEPARATOR; + } + } + + /** + * get + * + * @param stfn String + * @return String + */ + public static String getStFNPath(String stfn) { + + return consumeFileName(stfn); + } + + public static String consumeElement(String stfnPath) { + + Path path = new Path(stfnPath); + int length = path.getLength(); + if (length > 1) { + return path.getSubPath(length - 1).getPath() + NamingConst.SEPARATOR; + } else { + return ""; + } + } + + public static String extractRelativePath(String root, String absolute) { + + if (absolute.startsWith(root)) { + Path rootPath = new Path(root); + int rootLength = rootPath.getLength(); + + Path absPath = new Path(absolute); + List elem = Lists.newArrayList(); + + for (int i = 0; i < absPath.getLength(); i++) { + // Why use length and not compare single element? + if (i >= rootLength) { + elem.add(absPath.getElementAt(i)); + } + } + Path result = new Path(elem, false); + + return result.getPath(); + } else { + return absolute; + } + } + + /** + * Is the first path within the second one? + * + * @param root + * @param wrapperCandidate + * @return + */ + public static boolean isEnclosed(String root, String wrapperCandidate) { + + boolean result = false; + Path rootPath = new Path(root); + Path wrapperPath = new Path(wrapperCandidate); + result = rootPath.isEnclosed(wrapperPath); + return result; + } + + /** + * + * @param stfnPath + * @param vfsApproachable + * @return the mapped rule or null if not found + */ + public static MappingRule getWinnerRule(String stfnPath, Collection mappingRules, + Collection vfsApproachable) { + + Preconditions.checkNotNull(stfnPath, "Unable to get winning rule: invalid null stfnPath"); + Preconditions.checkNotNull(mappingRules, + "Unable to get winning rule: invalid null mapping rules"); + Preconditions.checkNotNull(vfsApproachable, + "Unable to get winning rule: invalid null VFS list"); + + if (mappingRules.isEmpty()) { + log.warn("Unable to get winning rule: empty mapping rules"); + return null; + } + + if (vfsApproachable.isEmpty()) { + log.debug("Unable to get winning rule: empty VFS list"); + return null; + } + + log.debug("Searching winner rule for {}", stfnPath); + MappingRule winnerRule = null; + + Vector rules = new Vector(mappingRules); + + int minDistance = Integer.MAX_VALUE; + for (MappingRule rule : rules) { + if (isEnclosed(rule.getStFNRoot(), stfnPath) + && vfsApproachable.contains(rule.getMappedFS())) { + int distance = computeDistanceFromPath(rule.getStFNRoot(), stfnPath); + if (distance < minDistance) { + minDistance = distance; + winnerRule = rule; + } + } + } + return winnerRule; + } + + public static MappingRule getWinnerRule(TSURL surl, Collection mappingRules, + Collection vfsApproachable) { + + return getWinnerRule(surl.sfn().stfn().toString(), mappingRules, vfsApproachable); + } + + public static VirtualFS getWinnerVFS(String absolutePath, + Map vfsListByRootPath) throws NamespaceException { + + VirtualFS vfsWinner = null; + int distance = Integer.MAX_VALUE; + for (String vfsRoot : vfsListByRootPath.keySet()) { + int d = computeDistanceFromPath(vfsRoot, absolutePath); + log.debug("Pondering VFS Root '{}' against '{}'. Distance = {}", vfsRoot, absolutePath, d); + if (d < distance) { + boolean enclosed = isEnclosed(vfsRoot, absolutePath); + if (enclosed) { + distance = d; + vfsWinner = vfsListByRootPath.get(vfsRoot); + log.debug("Partial winner is {} (VFS: {})", vfsRoot, vfsWinner.getAliasName()); + } + } + } + if (vfsWinner == null) { + log.error("Unable to found a VFS compatible with path: '{}'", absolutePath); + throw new NamespaceException( + "Unable to found a VFS compatible with path :'" + absolutePath + "'"); + } + return vfsWinner; + } + + public static String resolveVOName(String filename, Map vfsListByRootPath) + throws NamespaceException { + + VirtualFS vfs = getWinnerVFS(filename, vfsListByRootPath); + /* NamespaceException raised if vfs is not found => vfs is not null */ + VONameMatchingRule rule = + vfs.getApproachableRules().get(0).getSubjectRules().getVONameMatchingRule(); + return rule.getVOName(); + } + + /** + * ===================== INNER CLASSES ====================== + */ + + /** + * + *

+ * Title: + *

+ * + *

+ * Description: + *

+ * + */ + static class PathElement { + + private final String pathChunk; + + public PathElement(String path) { + + this.pathChunk = path; + } + + public String getPathChunk() { + + return this.pathChunk; + } + + @Override + public int hashCode() { + + return this.pathChunk.hashCode(); + } + + @Override + public boolean equals(Object obj) { + + boolean result = true; + if (!(obj instanceof PathElement)) { + result = false; + } else { + PathElement other = (PathElement) obj; + result = (this.getPathChunk()).equals(other.getPathChunk()); + } + return result; + } + + @Override + public String toString() { + + return pathChunk; + } + } + + /** + * + *

+ * Title: + *

+ * + *

+ * Description: + *

+ * + */ + private static class Path { + + private List path; + private static String PATH_SEPARATOR = "/"; + public static final String[] EMPTY_STRING_ARRAY = {}; + public boolean directory; + public boolean absolutePath; + + public Path() { + + this.path = Lists.newArrayList(); + this.directory = false; + this.absolutePath = true; + } + + public Path(List path, boolean absolutePath) { + + this.path = path; + this.directory = false; + this.absolutePath = absolutePath; + } + + public Path(String path) { + + // Factorize path into array of PathElement... + if (path.startsWith(PATH_SEPARATOR)) { + this.absolutePath = true; + } else { + this.absolutePath = false; + } + if (path.endsWith(PATH_SEPARATOR)) { + this.directory = true; + } else { + this.directory = false; + } + + String[] pathElements = factorizePath(path); + if (pathElements != null) { + // ...and build Path + this.path = Lists.newArrayList(); + for (String pathElement : pathElements) { + addPathElement(new PathElement(pathElement)); + } + } + } + + public String[] factorizePath(String path) { + + return toStringArray(path, PATH_SEPARATOR); + } + + public List getPathElements() { + + List result = Lists.newArrayList(); + Iterator scan = path.iterator(); + while (scan.hasNext()) { + PathElement p = scan.next(); + result.add(p.toString()); + } + return result; + } + + private String[] toStringArray(String value, String delim) { + + if (value != null) { + return split(delim, value); + } else { + return EMPTY_STRING_ARRAY; + } + } + + private String[] split(String seperators, String list) { + + return split(seperators, list, false); + } + + private String[] split(String seperators, String list, boolean include) { + + StringTokenizer tokens = new StringTokenizer(list, seperators, include); + String[] result = new String[tokens.countTokens()]; + int i = 0; + while (tokens.hasMoreTokens()) { + result[i++] = tokens.nextToken(); + } + return result; + } + + public String getPath() { + + StringBuilder buf = new StringBuilder(); + if (this.absolutePath) { + buf.append(PATH_SEPARATOR); + } + for (Iterator iter = path.iterator(); iter.hasNext();) { + PathElement item = iter.next(); + buf.append(item.getPathChunk()); + if (iter.hasNext()) { + buf.append(PATH_SEPARATOR); + } + } + if (this.directory) { + buf.append(PATH_SEPARATOR); + } + return buf.toString(); + } + + public int getLength() { + + if (path != null) { + return path.size(); + } else { + return 0; + } + } + + /** + * + * @param position int + * @return PathElement + */ + public PathElement getElementAt(int position) { + + if (position < getLength()) { + return this.path.get(position); + } else { + return null; + } + } + + /** + * + * @param obj Object + * @return boolean + */ + @Override + public boolean equals(Object obj) { + + boolean result = true; + if (!(obj instanceof Path)) { + result = false; + } else { + Path other = (Path) obj; + if (other.getLength() != this.getLength()) { + result = false; + } else { + int size = this.getLength(); + for (int i = 0; i < size; i++) { + if (!(this.getElementAt(i)).equals(other.getElementAt(i))) { + result = false; + break; + } + } + } + } + return result; + } + + /** + * + * @param pathChunk PathElement + */ + public void addPathElement(PathElement pathChunk) { + + this.path.add(pathChunk); + } + + /** + * + * @param elements int + * @return Path + */ + public Path getSubPath(int elements) { + + Path result = new Path(); + for (int i = 0; i < elements; i++) { + result.addPathElement(this.getElementAt(i)); + } + return result; + } + + /** + * + * @param wrapperCandidate Path + * @return boolean + */ + public boolean isEnclosed(Path wrapperCandidate) { + + boolean result = false; + if (this.getLength() > wrapperCandidate.getLength()) { + result = false; + } else { + Path other = wrapperCandidate.getSubPath(this.getLength()); + result = other.equals(this); + } + return result; + } + + /** + * + * @param other Path + * @return int + */ + public int distance(Path other) { + + int result = -1; + Path a; + Path b; + if (this.getLength() > other.getLength()) { + a = this; + b = other; + } else { + a = other; + b = this; + } + if (b.isEnclosed(a)) { + result = (a.getLength() - b.getLength()); + } else { + result = a.getLength() + b.getLength(); + } + return result; + } + + /** + * + * @return String + */ + @Override + public String toString() { + + StringBuilder buf = new StringBuilder(); + buf.append("["); + for (int i = 0; i < this.getLength(); i++) { + buf.append(" "); + buf.append(this.getElementAt(i).getPathChunk()); + } + buf.append(" ]"); + return buf.toString(); + } + } } diff --git a/src/main/java/it/grid/storm/namespace/naming/NamingConst.java b/src/main/java/it/grid/storm/namespace/naming/NamingConst.java index 88d819aef..36d506471 100644 --- a/src/main/java/it/grid/storm/namespace/naming/NamingConst.java +++ b/src/main/java/it/grid/storm/namespace/naming/NamingConst.java @@ -4,7 +4,7 @@ */ package it.grid.storm.namespace.naming; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; public class NamingConst { @@ -25,11 +25,11 @@ public class NamingConst { private static NamingConst instance = new NamingConst(); - private final Configuration config; + private final StormConfiguration config; private NamingConst() { - config = Configuration.getInstance(); + config = StormConfiguration.getInstance(); } public static String getServiceDefaultHost() { diff --git a/src/main/java/it/grid/storm/namespace/naming/SURL.java b/src/main/java/it/grid/storm/namespace/naming/SURL.java index 7a405eef7..260dc7990 100644 --- a/src/main/java/it/grid/storm/namespace/naming/SURL.java +++ b/src/main/java/it/grid/storm/namespace/naming/SURL.java @@ -4,269 +4,240 @@ */ package it.grid.storm.namespace.naming; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.model.Protocol; - import java.net.URI; -import java.util.ArrayList; +import java.util.List; import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; + +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.model.Protocol; -/** - *

- * Title: - *

- * - *

- * Description: - *

- * - *

- * Copyright: Copyright (c) 2006 - *

- * - *

- * Company: INFN-CNAF and ICTP/eGrid project - *

- * - * @author Riccardo Zappi - * @version 1.0 - */ public class SURL extends SRMURL { - private static Logger log = NamespaceDirector.getLogger(); - private static ArrayList schemes = new ArrayList(); - - static { - schemes.add("srm"); - } - - public final boolean directory; - - private SURL(final String hostName, final int port, - final String serviceEndpoint, final String queryString) { - - super(Protocol.SRM, hostName, port, serviceEndpoint, queryString); - directory = checkDirectory(queryString); - } - - private SURL(final String hostName, final int port, final String stfn) { - - super(Protocol.SRM, hostName, port, stfn); - directory = checkDirectory(stfn); - } - - // TODO MICHELE USER_SURL debug - public SURL(final String stfn) { - - super(Protocol.SRM, NamingConst.getServiceDefaultHost(), NamingConst - .getServicePort(), stfn); - directory = checkDirectory(stfn); - } - - /** - * Build SURL from the string format. Many control will be executed in the - * string format No other way to create a SURL, if u got a SURL for sure it's - * a valid URI normalized - * - * @param surlString - * String - * @return SURL - */ - public static SURL makeSURLfromString(String surlString) - throws NamespaceException { - - SURL result = null; - - // checks if is a valid uri and normalize - URI uri = null; - try { - uri = URI.create(surlString); - } catch (IllegalArgumentException uriEx) { - throw new NamespaceException("SURL_String :'" + surlString - + "' is INVALID. Reason: URI Except: " + uriEx.getMessage()); - } catch (NullPointerException npe) { - throw new NamespaceException("SURL_String :'" + surlString - + "' is INVALID. Reason: URI Except (null SURL): " + npe.getMessage()); - } - - // Check the scheme - // uri should be not null - String scheme = uri.getScheme(); - if (!(schemes.contains(scheme))) { - throw new NamespaceException("SURL_String :'" + surlString - + "' is INVALID. Reason: unknown scheme '" + scheme + "'"); - } - - // Check the query - String host = uri.getHost(); - if (host == null) { - throw new NamespaceException("SURL_String :'" + surlString - + "' is INVALID. Reason: malformed host!"); - } - int port = uri.getPort(); - String query = uri.getQuery(); - if (query == null || query.trim().equals("")) { - String stfn = uri.getPath(); - result = new SURL(host, port, stfn); - } else { - // The SURL_Str is in a Query FORM. - log.debug(" !! SURL ('" + surlString + "') in a query form (query:'" - + query + "') !!"); - String service = uri.getPath(); - log.debug(" Service endpoint : " + service); - if (checkQuery(query)) { - log.debug(" Query is in a valid form."); - // Extract the StFN from query: - String stfn = extractStFNfromQuery(query); - result = new SURL(host, port, service, stfn); - } else { - log.warn("SURL_String :'" + surlString - + "' is not VALID! (query is in invalid form)"); - throw new NamespaceException("SURL_String :'" + surlString - + "' is not VALID within the Query!"); - } - } - return result; - } - - public String getQueryFormAsString() { - if (this.isNormalFormSURL()) { - String uriString = transfProtocol.getProtocol().getSchema() + "://" - + this.transfProtocol.getAuthority().getServiceHostname(); - if (this.transfProtocol.getAuthority().getServicePort() >= 0) { - uriString += ":" + this.transfProtocol.getAuthority().getServicePort(); - } - uriString += "/srm/managerv2?SFN=" + this.path; - return uriString; - } - return this.getSURLAsURIString(); - } - - public String getNormalFormAsString() { - if (this.isQueriedFormSURL()) { - String uriString = transfProtocol.getProtocol().getSchema() + "://" - + this.transfProtocol.getAuthority().getServiceHostname(); - if (this.transfProtocol.getAuthority().getServicePort() >= 0) { - uriString += ":" + this.transfProtocol.getAuthority().getServicePort(); - } - uriString += this.getStFN(); - return uriString; - } - return this.getSURLAsURIString(); - } - - public boolean isDirectory() { - - return directory; - } - - private boolean checkDirectory(String path) { - - if (path != null && path.endsWith(NamingConst.SEPARATOR)) { - return true; - } else { - return false; - } - } - - /** - * - * Checks if the query string begins with the correct prefix ("SFN=") - * - * @param query - * @return - */ - private static boolean checkQuery(String query) { - - if (query == null) { - log.error("Received a null query to check!"); - return false; - } - return query.startsWith(NamingConst.getServiceSFNQueryPrefix() + "="); - } - - private static String extractStFNfromQuery(String query) { - - String stfn = ""; - if (query == null) { - return stfn; - } else { - int len = query.length(); - if (len < 4) { - return stfn; - } else { - stfn = query.substring(4); - } - } - return stfn; - } - - /** - * get the path and query string e.g. /path/service?SFN=pippo.txt if query - * form e.g /path/pippo.txt if simple form - * - * @return the path and its query string - */ - public String getPathQuery() { - - StringBuilder sb = new StringBuilder(250); - sb.append(getPath()); - if (this.isQueriedFormSURL()) { - sb.append("?"); - sb.append(NamingConst.getServiceSFNQueryPrefix()); - sb.append("="); - sb.append(getQueryString()); - } - return sb.toString(); - } - - public String getSURLAsURIString() { - - String uriString = transfProtocol.getProtocol().getSchema() + "://" - + this.transfProtocol.getAuthority().getServiceHostname(); - if (this.transfProtocol.getAuthority().getServicePort() >= 0) { - uriString += ":" + this.transfProtocol.getAuthority().getServicePort(); - } - if (this.isNormalFormSURL()) { - uriString += this.path; - } else { - uriString += this.getPathQuery(); - } - return uriString; - } - - @Override - public String toString() { - - StringBuilder buffer = new StringBuilder(); - buffer.append(this.transfProtocol.toString()); - buffer.append(this.getPathQuery()); - return buffer.toString(); - } - - @Override - public int hashCode() { - - int result = super.hashCode(); - result += 37 * schemes.hashCode() + 63 * (directory ? 1 : 0); - return result; - } - - /* - * - */ - @Override - public boolean equals(Object obj) { - - if (!super.equals(obj)) - return false; - if (!(obj instanceof SURL)) - return false; - SURL other = (SURL) obj; - if (directory != other.directory) - return false; - return true; - } + private static Logger log = LoggerFactory.getLogger(SURL.class); + private static List schemes = Lists.newArrayList("srm"); + + public final boolean directory; + + private SURL(final String hostName, final int port, final String serviceEndpoint, + final String queryString) { + + super(Protocol.SRM, hostName, port, serviceEndpoint, queryString); + directory = checkDirectory(queryString); + } + + private SURL(final String hostName, final int port, final String stfn) { + + super(Protocol.SRM, hostName, port, stfn); + directory = checkDirectory(stfn); + } + + public SURL(final String stfn) { + + super(Protocol.SRM, NamingConst.getServiceDefaultHost(), NamingConst.getServicePort(), stfn); + directory = checkDirectory(stfn); + } + + /** + * Build SURL from the string format. Many control will be executed in the string format No other + * way to create a SURL, if u got a SURL for sure it's a valid URI normalized + * + * @param surlString String + * @return SURL + */ + public static SURL makeSURLfromString(String surlString) throws NamespaceException { + + SURL result = null; + + // checks if is a valid uri and normalize + URI uri = null; + try { + uri = URI.create(surlString); + } catch (IllegalArgumentException uriEx) { + throw new NamespaceException("SURL_String :'" + surlString + + "' is INVALID. Reason: URI Except: " + uriEx.getMessage()); + } catch (NullPointerException npe) { + throw new NamespaceException("SURL_String :'" + surlString + + "' is INVALID. Reason: URI Except (null SURL): " + npe.getMessage()); + } + + // Check the scheme + // uri should be not null + String scheme = uri.getScheme(); + if (!(schemes.contains(scheme))) { + throw new NamespaceException( + "SURL_String :'" + surlString + "' is INVALID. Reason: unknown scheme '" + scheme + "'"); + } + + // Check the query + String host = uri.getHost(); + if (host == null) { + throw new NamespaceException( + "SURL_String :'" + surlString + "' is INVALID. Reason: malformed host!"); + } + int port = uri.getPort(); + String query = uri.getQuery(); + if (query == null || query.trim().equals("")) { + String stfn = uri.getPath(); + result = new SURL(host, port, stfn); + } else { + // The SURL_Str is in a Query FORM. + log.debug(" !! SURL ('" + surlString + "') in a query form (query:'" + query + "') !!"); + String service = uri.getPath(); + log.debug(" Service endpoint : " + service); + if (checkQuery(query)) { + log.debug(" Query is in a valid form."); + // Extract the StFN from query: + String stfn = extractStFNfromQuery(query); + result = new SURL(host, port, service, stfn); + } else { + log.warn("SURL_String :'" + surlString + "' is not VALID! (query is in invalid form)"); + throw new NamespaceException( + "SURL_String :'" + surlString + "' is not VALID within the Query!"); + } + } + return result; + } + + public String getQueryFormAsString() { + if (this.isNormalFormSURL()) { + String uriString = transfProtocol.getProtocol().getSchema() + "://" + + this.transfProtocol.getAuthority().getServiceHostname(); + if (this.transfProtocol.getAuthority().getServicePort() >= 0) { + uriString += ":" + this.transfProtocol.getAuthority().getServicePort(); + } + uriString += "/srm/managerv2?SFN=" + this.path; + return uriString; + } + return this.getSURLAsURIString(); + } + + public String getNormalFormAsString() { + if (this.isQueriedFormSURL()) { + String uriString = transfProtocol.getProtocol().getSchema() + "://" + + this.transfProtocol.getAuthority().getServiceHostname(); + if (this.transfProtocol.getAuthority().getServicePort() >= 0) { + uriString += ":" + this.transfProtocol.getAuthority().getServicePort(); + } + uriString += this.getStFN(); + return uriString; + } + return this.getSURLAsURIString(); + } + + public boolean isDirectory() { + + return directory; + } + + private boolean checkDirectory(String path) { + + if (path != null && path.endsWith(NamingConst.SEPARATOR)) { + return true; + } else { + return false; + } + } + + /** + * + * Checks if the query string begins with the correct prefix ("SFN=") + * + * @param query + * @return + */ + private static boolean checkQuery(String query) { + + if (query == null) { + log.error("Received a null query to check!"); + return false; + } + return query.startsWith(NamingConst.getServiceSFNQueryPrefix() + "="); + } + + private static String extractStFNfromQuery(String query) { + + String stfn = ""; + if (query == null) { + return stfn; + } else { + int len = query.length(); + if (len < 4) { + return stfn; + } else { + stfn = query.substring(4); + } + } + return stfn; + } + + /** + * get the path and query string e.g. /path/service?SFN=pippo.txt if query form e.g + * /path/pippo.txt if simple form + * + * @return the path and its query string + */ + public String getPathQuery() { + + StringBuilder sb = new StringBuilder(250); + sb.append(getPath()); + if (this.isQueriedFormSURL()) { + sb.append("?"); + sb.append(NamingConst.getServiceSFNQueryPrefix()); + sb.append("="); + sb.append(getQueryString()); + } + return sb.toString(); + } + + public String getSURLAsURIString() { + + String uriString = transfProtocol.getProtocol().getSchema() + "://" + + this.transfProtocol.getAuthority().getServiceHostname(); + if (this.transfProtocol.getAuthority().getServicePort() >= 0) { + uriString += ":" + this.transfProtocol.getAuthority().getServicePort(); + } + if (this.isNormalFormSURL()) { + uriString += this.path; + } else { + uriString += this.getPathQuery(); + } + return uriString; + } + + @Override + public String toString() { + + StringBuilder buffer = new StringBuilder(); + buffer.append(this.transfProtocol.toString()); + buffer.append(this.getPathQuery()); + return buffer.toString(); + } + + @Override + public int hashCode() { + + int result = super.hashCode(); + result += 37 * schemes.hashCode() + 63 * (directory ? 1 : 0); + return result; + } + + /* + * + */ + @Override + public boolean equals(Object obj) { + + if (!super.equals(obj)) + return false; + if (!(obj instanceof SURL)) + return false; + SURL other = (SURL) obj; + if (directory != other.directory) + return false; + return true; + } } diff --git a/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResource.java b/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResource.java index d2b6823ba..45a27801f 100644 --- a/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResource.java +++ b/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResource.java @@ -17,15 +17,12 @@ import com.google.common.collect.Maps; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.model.SAInfo; import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.namespace.remote.Constants; -/** - * @author Michele Dibenedetto - */ @Path("/" + Constants.RESOURCE + "/" + Constants.VERSION) public class VirtualFSResource { @@ -40,7 +37,7 @@ public class VirtualFSResource { public Map listVFS() { log.debug("Serving VFS resource listing"); - List vfsCollection = NamespaceDirector.getNamespace().getAllDefinedVFS(); + List vfsCollection = Namespace.getInstance().getAllDefinedVFS(); Map output = Maps.newHashMap(); for (VirtualFS vfs : vfsCollection) { diff --git a/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResourceCompat_1_0.java b/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResourceCompat_1_0.java deleted file mode 100644 index 42d0688f6..000000000 --- a/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResourceCompat_1_0.java +++ /dev/null @@ -1,84 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.namespace.remote.resource; - -import static javax.ws.rs.core.Response.Status.INTERNAL_SERVER_ERROR; - -import java.util.List; - -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.Produces; -import javax.ws.rs.WebApplicationException; -import javax.ws.rs.core.Response; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.model.MappingRule; -import it.grid.storm.namespace.model.VirtualFS; -import it.grid.storm.namespace.remote.Constants; - -/** - * @author Michele Dibenedetto - */ -@Path("/" + Constants.RESOURCE + "/" + Constants.VERSION_1_0) -public class VirtualFSResourceCompat_1_0 { - - private static final Logger log = LoggerFactory.getLogger(VirtualFSResourceCompat_1_0.class); - - /** - * @return - */ - @GET - @Path("/" + Constants.LIST_ALL_KEY) - @Produces("text/plain") - public String listVFS() { - - log.info("Serving VFS resource listing"); - String vfsListString = ""; - List vfsCollection = NamespaceDirector.getNamespace().getAllDefinedVFS(); - for (VirtualFS vfs : vfsCollection) { - if (!vfsListString.equals("")) { - vfsListString += Constants.VFS_LIST_SEPARATOR; - } - try { - vfsListString += encodeVFS(vfs); - } catch (NamespaceException e) { - log.error("Unable to encode the virtual file system. NamespaceException : {}", - e.getMessage()); - throw new WebApplicationException(Response.status(INTERNAL_SERVER_ERROR) - .entity("Unable to encode the virtual file system") - .build()); - } - } - return vfsListString; - } - - /** - * @param vfs - * @return - * @throws NamespaceException - */ - private String encodeVFS(VirtualFS vfs) throws NamespaceException { - - String vfsEncoded = Constants.VFS_NAME_KEY + Constants.VFS_FIELD_MATCHER + vfs.getAliasName(); - vfsEncoded += Constants.VFS_FIELD_SEPARATOR; - vfsEncoded += Constants.VFS_ROOT_KEY + Constants.VFS_FIELD_MATCHER + vfs.getRootPath(); - vfsEncoded += Constants.VFS_FIELD_SEPARATOR; - List mappingRules = vfs.getMappingRules(); - vfsEncoded += Constants.VFS_STFN_ROOT_KEY + Constants.VFS_FIELD_MATCHER; - for (int i = 0; i < mappingRules.size(); i++) { - MappingRule mappingRule = mappingRules.get(i); - if (i > 0) { - vfsEncoded += Constants.VFS_STFN_ROOT_SEPARATOR; - } - vfsEncoded += mappingRule.getStFNRoot(); - } - return vfsEncoded; - } -} diff --git a/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResourceCompat_1_1.java b/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResourceCompat_1_1.java deleted file mode 100644 index 8873f373a..000000000 --- a/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResourceCompat_1_1.java +++ /dev/null @@ -1,99 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.namespace.remote.resource; - -import static javax.ws.rs.core.Response.Status.INTERNAL_SERVER_ERROR; - -import java.util.Iterator; -import java.util.List; - -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.Produces; -import javax.ws.rs.WebApplicationException; -import javax.ws.rs.core.Response; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.model.MappingRule; -import it.grid.storm.namespace.model.Protocol; -import it.grid.storm.namespace.model.VirtualFS; -import it.grid.storm.namespace.remote.Constants; - -/** - * @author Michele Dibenedetto - */ -@Path("/" + Constants.RESOURCE + "/" + Constants.VERSION_1_1) -public class VirtualFSResourceCompat_1_1 { - - private static final Logger log = LoggerFactory.getLogger(VirtualFSResourceCompat_1_1.class); - - /** - * @return - */ - @GET - @Path("/" + Constants.LIST_ALL_KEY) - @Produces("text/plain") - public String listVFS() { - - log.info("Serving VFS resource listing"); - String vfsListString = ""; - List vfsCollection = NamespaceDirector.getNamespace().getAllDefinedVFS(); - for (VirtualFS vfs : vfsCollection) { - if (!vfsListString.equals("")) { - vfsListString += Constants.VFS_LIST_SEPARATOR; - } - try { - vfsListString += encodeVFS(vfs); - } catch (NamespaceException e) { - log.error("Unable to encode the virtual file system. NamespaceException : {}", - e.getMessage()); - throw new WebApplicationException(Response.status(INTERNAL_SERVER_ERROR) - .entity("Unable to encode the virtual file system") - .build()); - } - } - return vfsListString; - } - - /** - * @param vfs - * @return - * @throws NamespaceException - */ - private String encodeVFS(VirtualFS vfs) throws NamespaceException { - - String vfsEncoded = Constants.VFS_NAME_KEY + Constants.VFS_FIELD_MATCHER + vfs.getAliasName(); - vfsEncoded += Constants.VFS_FIELD_SEPARATOR; - vfsEncoded += Constants.VFS_ROOT_KEY + Constants.VFS_FIELD_MATCHER + vfs.getRootPath(); - vfsEncoded += Constants.VFS_FIELD_SEPARATOR; - List mappingRules = vfs.getMappingRules(); - vfsEncoded += Constants.VFS_STFN_ROOT_KEY + Constants.VFS_FIELD_MATCHER; - for (int i = 0; i < mappingRules.size(); i++) { - MappingRule mappingRule = mappingRules.get(i); - if (i > 0) { - vfsEncoded += Constants.VFS_STFN_ROOT_SEPARATOR; - } - vfsEncoded += mappingRule.getStFNRoot(); - } - Iterator protocolsIterator = - vfs.getCapabilities().getAllManagedProtocols().iterator(); - if (protocolsIterator.hasNext()) { - vfsEncoded += Constants.VFS_FIELD_SEPARATOR; - vfsEncoded += Constants.VFS_ENABLED_PROTOCOLS_KEY; - vfsEncoded += Constants.VFS_FIELD_MATCHER; - } - while (protocolsIterator.hasNext()) { - vfsEncoded += protocolsIterator.next().getSchema(); - if (protocolsIterator.hasNext()) { - vfsEncoded += Constants.VFS_ENABLED_PROTOCOLS_SEPARATOR; - } - } - return vfsEncoded; - } -} diff --git a/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResourceCompat_1_2.java b/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResourceCompat_1_2.java deleted file mode 100644 index fcbf651d7..000000000 --- a/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResourceCompat_1_2.java +++ /dev/null @@ -1,112 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.namespace.remote.resource; - -import static it.grid.storm.namespace.remote.Constants.VFS_LIST_SEPARATOR; -import static java.lang.String.join; -import static java.lang.String.valueOf; - -import java.util.Iterator; -import java.util.List; - -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.Produces; -import javax.ws.rs.WebApplicationException; -import javax.ws.rs.core.Response; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.collect.Lists; - -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.model.MappingRule; -import it.grid.storm.namespace.model.Protocol; -import it.grid.storm.namespace.model.VirtualFS; -import it.grid.storm.namespace.remote.Constants; -import it.grid.storm.namespace.remote.Constants.HttpPerms; - -/** - * @author Michele Dibenedetto - */ -@Path("/" + Constants.RESOURCE + "/" + Constants.VERSION_1_2) -public class VirtualFSResourceCompat_1_2 { - - private static final Logger log = LoggerFactory.getLogger(VirtualFSResourceCompat_1_2.class); - - /** - * @return - */ - @GET - @Path("/" + Constants.LIST_ALL_KEY) - @Produces("text/plain") - public String listVFS() { - - log.info("Serving VFS resource listing"); - List vfsCollection = NamespaceDirector.getNamespace().getAllDefinedVFS(); - List encodedVFSs = Lists.newArrayList(); - vfsCollection.forEach(vfs -> { - try { - encodedVFSs.add(encodeVFS(vfs)); - } catch (NamespaceException e) { - log.error( - "Unable to encode the virtual file system. NamespaceException : {}", e.getMessage()); - throw new WebApplicationException( - Response.serverError().entity("Unable to encode the virtual file system").build()); - } - }); - return join(valueOf(VFS_LIST_SEPARATOR), encodedVFSs); - } - - /** - * @param vfs - * @return - * @throws NamespaceException - */ - private String encodeVFS(VirtualFS vfs) throws NamespaceException { - - String vfsEncoded = Constants.VFS_NAME_KEY + Constants.VFS_FIELD_MATCHER + vfs.getAliasName(); - vfsEncoded += Constants.VFS_FIELD_SEPARATOR; - vfsEncoded += Constants.VFS_ROOT_KEY + Constants.VFS_FIELD_MATCHER + vfs.getRootPath(); - vfsEncoded += Constants.VFS_FIELD_SEPARATOR; - List mappingRules = vfs.getMappingRules(); - vfsEncoded += Constants.VFS_STFN_ROOT_KEY + Constants.VFS_FIELD_MATCHER; - for (int i = 0; i < mappingRules.size(); i++) { - MappingRule mappingRule = mappingRules.get(i); - if (i > 0) { - vfsEncoded += Constants.VFS_STFN_ROOT_SEPARATOR; - } - vfsEncoded += mappingRule.getStFNRoot(); - } - Iterator protocolsIterator = - vfs.getCapabilities().getAllManagedProtocols().iterator(); - if (protocolsIterator.hasNext()) { - vfsEncoded += Constants.VFS_FIELD_SEPARATOR; - vfsEncoded += Constants.VFS_ENABLED_PROTOCOLS_KEY; - vfsEncoded += Constants.VFS_FIELD_MATCHER; - } - while (protocolsIterator.hasNext()) { - vfsEncoded += protocolsIterator.next().getSchema(); - if (protocolsIterator.hasNext()) { - vfsEncoded += Constants.VFS_ENABLED_PROTOCOLS_SEPARATOR; - } - } - vfsEncoded += Constants.VFS_FIELD_SEPARATOR; - vfsEncoded += Constants.VFS_ANONYMOUS_PERMS_KEY; - vfsEncoded += Constants.VFS_FIELD_MATCHER; - if (vfs.isHttpWorldReadable()) { - if (vfs.isApproachableByAnonymous()) { - vfsEncoded += HttpPerms.READWRITE; - } else { - vfsEncoded += HttpPerms.READ; - } - } else { - vfsEncoded += HttpPerms.NOREAD; - } - return vfsEncoded; - } -} diff --git a/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResourceCompat_1_3.java b/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResourceCompat_1_3.java deleted file mode 100644 index bc0c96221..000000000 --- a/src/main/java/it/grid/storm/namespace/remote/resource/VirtualFSResourceCompat_1_3.java +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.namespace.remote.resource; - -import java.util.List; -import java.util.Map; - -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.Produces; -import javax.ws.rs.core.MediaType; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.collect.Maps; - -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.model.SAInfoV13; -import it.grid.storm.namespace.model.VirtualFS; -import it.grid.storm.namespace.remote.Constants; - -/** - * @author Michele Dibenedetto - */ -@Path("/" + Constants.RESOURCE + "/" + Constants.VERSION_1_3) -public class VirtualFSResourceCompat_1_3 { - - private static final Logger log = LoggerFactory.getLogger(VirtualFSResourceCompat_1_3.class); - - /** - * @return - */ - @GET - @Path("/" + Constants.LIST_ALL_VFS) - @Produces(MediaType.APPLICATION_JSON) - public Map listVFS() { - - log.debug("Serving VFS resource listing"); - List vfsCollection = NamespaceDirector.getNamespace().getAllDefinedVFS(); - Map output = Maps.newHashMap(); - - for (VirtualFS vfs : vfsCollection) { - try { - output.put(vfs.getAliasName(), SAInfoV13.buildFromVFS(vfs)); - } catch (NamespaceException e) { - log.error(e.getMessage()); - } - } - - return output; - } - -} diff --git a/src/main/java/it/grid/storm/persistence/DAOFactory.java b/src/main/java/it/grid/storm/persistence/DAOFactory.java deleted file mode 100644 index d71307803..000000000 --- a/src/main/java/it/grid/storm/persistence/DAOFactory.java +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -/* - * (c)2004 INFN / ICTP-eGrid This file can be distributed and/or modified under - * the terms of the INFN Software License. For a copy of the licence please - * visit http://www.cnaf.infn.it/license.html - */ - -package it.grid.storm.persistence; - -import it.grid.storm.persistence.dao.PtGChunkDAO; -import it.grid.storm.persistence.dao.PtPChunkDAO; -import it.grid.storm.persistence.dao.RequestSummaryDAO; -import it.grid.storm.persistence.dao.StorageAreaDAO; -import it.grid.storm.persistence.dao.StorageSpaceDAO; -import it.grid.storm.persistence.dao.TapeRecallDAO; -import it.grid.storm.persistence.exceptions.DataAccessException; - -/** - * Returns an implementation of all Catalog interfaces. - * - * @author Riccardo Zappi - riccardo.zappi AT cnaf.infn.it - * @version $Id: DAOFactory.java,v 1.3 2005/10/22 15:09:40 rzappi Exp $ - */ -public interface DAOFactory { - - /** - * Returns an implementation of StorageSpaceCatalog, specific to a particular - * datastore. - * - * @throws DataAccessException - * @return StorageSpaceDAO - */ - public StorageSpaceDAO getStorageSpaceDAO() throws DataAccessException; - - public TapeRecallDAO getTapeRecallDAO(); - - public TapeRecallDAO getTapeRecallDAO(boolean test) - throws DataAccessException; - - public PtGChunkDAO getPtGChunkDAO() throws DataAccessException; - - public PtPChunkDAO getPtPChunkDAO() throws DataAccessException; - - public StorageAreaDAO getStorageAreaDAO() throws DataAccessException; - - public RequestSummaryDAO getRequestSummaryDAO() throws DataAccessException; - -} diff --git a/src/main/java/it/grid/storm/persistence/DataSourceConnectionFactory.java b/src/main/java/it/grid/storm/persistence/DataSourceConnectionFactory.java deleted file mode 100644 index 313370250..000000000 --- a/src/main/java/it/grid/storm/persistence/DataSourceConnectionFactory.java +++ /dev/null @@ -1,16 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.persistence; - -import java.sql.Connection; -import it.grid.storm.persistence.exceptions.PersistenceException; - -public interface DataSourceConnectionFactory { - - public Connection borrowConnection() throws PersistenceException; - - public void giveBackConnection(Connection con) throws PersistenceException; - -} diff --git a/src/main/java/it/grid/storm/persistence/MySqlDAOFactory.java b/src/main/java/it/grid/storm/persistence/MySqlDAOFactory.java deleted file mode 100644 index 77e6c6e3f..000000000 --- a/src/main/java/it/grid/storm/persistence/MySqlDAOFactory.java +++ /dev/null @@ -1,136 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.persistence; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import it.grid.storm.persistence.dao.PtGChunkDAO; -import it.grid.storm.persistence.dao.PtPChunkDAO; -import it.grid.storm.persistence.dao.RequestSummaryDAO; -import it.grid.storm.persistence.dao.StorageAreaDAO; -import it.grid.storm.persistence.dao.StorageSpaceDAO; -import it.grid.storm.persistence.dao.TapeRecallDAO; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.impl.mysql.StorageSpaceDAOMySql; -import it.grid.storm.persistence.impl.mysql.TapeRecallDAOMySql; - -public class MySqlDAOFactory implements DAOFactory { - - public static final String factoryName = "JDBC - MySQL DAO Factory"; - - private static final Logger log = LoggerFactory - .getLogger(MySqlDAOFactory.class); - - private static MySqlDAOFactory factory = new MySqlDAOFactory(); - - /** - * - */ - private MySqlDAOFactory() { - log.info("DAO factory: {}", MySqlDAOFactory.factoryName); - } - - public static MySqlDAOFactory getInstance() { - - return MySqlDAOFactory.factory; - } - - /** - * Returns an implementation of StorageSpaceCatalog, specific to a particular - * datastore. - * - * @throws DataAccessException - * @return StorageSpaceDAO - * @todo Implement this it.grid.storm.persistence.DAOFactory method - */ - public StorageSpaceDAO getStorageSpaceDAO() throws DataAccessException { - - return new StorageSpaceDAOMySql(); - } - - /** - * Returns an implementation of TapeRecallCatalog, specific to a particular - * datastore. - * - * @throws DataAccessException - * @return TapeReallDAO - * @todo Implement this it.grid.storm.persistence.DAOFactory method - */ - public TapeRecallDAO getTapeRecallDAO() { - - return new TapeRecallDAOMySql(); - } - - /** - * @return String - */ - @Override - public String toString() { - - return MySqlDAOFactory.factoryName; - } - - - /** - * getPtGChunkDAO - * - * @return PtGChunkDAO - * @throws DataAccessException - * @todo Implement this it.grid.storm.persistence.DAOFactory method - */ - public PtGChunkDAO getPtGChunkDAO() throws DataAccessException { - - return null; - } - - /** - * getPtPChunkDAO - * - * @return PtPChunkDAO - * @throws DataAccessException - * @todo Implement this it.grid.storm.persistence.DAOFactory method - */ - public PtPChunkDAO getPtPChunkDAO() throws DataAccessException { - - return null; - } - - /** - * getRequestSummaryDAO - * - * @return RequestSummaryDAO - * @throws DataAccessException - * @todo Implement this it.grid.storm.persistence.DAOFactory method - */ - public RequestSummaryDAO getRequestSummaryDAO() throws DataAccessException { - - return null; - } - - /** - * getStorageAreaDAO - * - * @return StorageAreaDAO - * @throws DataAccessException - * @todo Implement this it.grid.storm.persistence.DAOFactory method - */ - public StorageAreaDAO getStorageAreaDAO() throws DataAccessException { - - return null; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.persistence.DAOFactory#getTapeRecallDAO(boolean) - */ - public TapeRecallDAO getTapeRecallDAO(boolean test) - throws DataAccessException { - - return new TapeRecallDAOMySql(); - } - -} diff --git a/src/main/java/it/grid/storm/persistence/PersistenceDirector.java b/src/main/java/it/grid/storm/persistence/PersistenceDirector.java deleted file mode 100644 index 6d1936e96..000000000 --- a/src/main/java/it/grid/storm/persistence/PersistenceDirector.java +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.persistence; - -import it.grid.storm.config.Configuration; -import it.grid.storm.persistence.exceptions.PersistenceException; -import it.grid.storm.persistence.util.db.DBConnectionPool; -import it.grid.storm.persistence.util.db.DataBaseStrategy; -import it.grid.storm.persistence.util.db.Databases; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class PersistenceDirector { - - private static final Logger log = LoggerFactory.getLogger("persistence"); - private static Configuration config; - private static DataBaseStrategy dbMan; - private static DAOFactory daoFactory; - private static DataSourceConnectionFactory connFactory; - - static { - log.trace("Initializing Persistence Director..."); - config = Configuration.getInstance(); - dbMan = Databases.getDataBaseStrategy("mysql"); - daoFactory = MySqlDAOFactory.getInstance(); - - int maxActive = config.getBEPersistencePoolDBMaxActive(); - int maxWait = config.getBEPersistencePoolDBMaxWait(); - - log.debug("Datasource connection string = {}", dbMan.getConnectionString()); - log.debug("Pool Max Active = {}", maxActive); - log.debug("Pool Max Wait = {}", maxWait); - - try { - DBConnectionPool.initPool(dbMan, maxActive, maxWait); - connFactory = DBConnectionPool.getPoolInstance(); - } catch (PersistenceException e) { - log.error(e.getMessage(), e); - System.exit(1); - } - } - - public static DAOFactory getDAOFactory() { - - return daoFactory; - } - - public static DataBaseStrategy getDataBase() { - - return dbMan; - } - - public static DataSourceConnectionFactory getConnectionFactory() { - - return connFactory; - } - - public static Logger getLogger() { - - return log; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/DirOptionConverter.java b/src/main/java/it/grid/storm/persistence/converter/DirOptionConverter.java similarity index 96% rename from src/main/java/it/grid/storm/catalogs/DirOptionConverter.java rename to src/main/java/it/grid/storm/persistence/converter/DirOptionConverter.java index 4229ed7b7..3c6c47a92 100644 --- a/src/main/java/it/grid/storm/catalogs/DirOptionConverter.java +++ b/src/main/java/it/grid/storm/persistence/converter/DirOptionConverter.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.converter; /** * Package private class that translates between DPM flag for TDirOption and diff --git a/src/main/java/it/grid/storm/persistence/converter/FileLifetimeConverter.java b/src/main/java/it/grid/storm/persistence/converter/FileLifetimeConverter.java new file mode 100644 index 000000000..e1c808f89 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/FileLifetimeConverter.java @@ -0,0 +1,59 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.converter; + +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.config.StormConfiguration; + +/** + * Class that handles DB representation of a pinLifetime as expressed by a TLifetimeInSeconds + * objects; in particular it takes care of protocol specification: + * + * 0/null/negative are translated as default StoRM configurable values. StoRMs Empty + * TLifeTimeInSeconds is translated as 0. + * + * @author EGRID ICTP + * @version 1.0 + * @date March 2007 + */ +public class FileLifetimeConverter { + + private static FileLifetimeConverter stc = new FileLifetimeConverter(); + + private FileLifetimeConverter() { + + } + + /** + * Method that returns the only instance of SizeInBytesIntConverter + */ + public static FileLifetimeConverter getInstance() { + + return stc; + } + + /** + * Method that translates the Empty TLifeTimeInSeconds into the empty representation of DB which + * is 0. Any other value is left as is. + */ + public int toDB(long l) { + + if (l == TLifeTimeInSeconds.makeEmpty().value()) + return 0; + return Long.valueOf(l).intValue(); + } + + /** + * Method that returns the long corresponding to the int value in the DB, except if it is 0, NULL + * or negative; a configurable default value is returned instead, corresponding to the + * getFileLifetimeDefault() Configuration class method. + */ + public long toStoRM(int s) { + + if (s <= 0) + return StormConfiguration.getInstance().getFileLifetimeDefault(); + return Integer.valueOf(s).longValue(); + } +} \ No newline at end of file diff --git a/src/main/java/it/grid/storm/persistence/converter/FileStorageTypeConverter.java b/src/main/java/it/grid/storm/persistence/converter/FileStorageTypeConverter.java new file mode 100644 index 000000000..ef1845c62 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/FileStorageTypeConverter.java @@ -0,0 +1,91 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.converter; + +import java.util.Iterator; +import java.util.Map; + +import com.google.common.collect.Maps; + +import it.grid.storm.config.StormConfiguration; +import it.grid.storm.srm.types.TFileStorageType; + +/** + * Package private auxiliary class used to convert between DB raw data and StoRM object model + * representation of TFileStorageType. + * + * @author: EGRID ICTP + * @version: 2.0 + * @date: June 2005 + */ +public class FileStorageTypeConverter { + + private Map DBtoSTORM = Maps.newHashMap(); + private Map STORMtoDB = Maps.newHashMap(); + + private static FileStorageTypeConverter c = new FileStorageTypeConverter(); + + /** + * Private constructor that fills in the conversion tables; + * + * V - VOLATILE P - PERMANENT D - DURABLE + */ + private FileStorageTypeConverter() { + + DBtoSTORM.put("V", TFileStorageType.VOLATILE); + DBtoSTORM.put("P", TFileStorageType.PERMANENT); + DBtoSTORM.put("D", TFileStorageType.DURABLE); + String aux; + for (Iterator i = DBtoSTORM.keySet().iterator(); i.hasNext();) { + aux = i.next(); + STORMtoDB.put(DBtoSTORM.get(aux), aux); + } + } + + /** + * Method that returns the only instance of FileStorageTypeConverter. + */ + public static FileStorageTypeConverter getInstance() { + + return c; + } + + /** + * Method that returns the String used in the DB to represent the given TFileStorageType. The + * empty String "" is returned if no match is found. + */ + public String toDB(TFileStorageType fst) { + + String aux = (String) STORMtoDB.get(fst); + if (aux == null) + return ""; + return aux; + } + + /** + * Method that returns the TFileStorageType used by StoRM to represent the supplied String + * representation in the DB. A configured default TFileStorageType is returned in case no + * corresponding StoRM type is found. TFileStorageType.EMPTY is returned if there are + * configuration errors. + */ + public TFileStorageType toSTORM(String s) { + + TFileStorageType aux = DBtoSTORM.get(s); + if (aux == null) + // This case is that the String s is different from V,P or D. + aux = DBtoSTORM.get(StormConfiguration.getInstance().getDefaultFileStorageType()); + if (aux == null) + // This case should never happen, but in case we prefer ponder PERMANENT. + return TFileStorageType.EMPTY; + else + return aux; + } + + public String toString() { + + return "FileStorageTypeConverter.\nDBtoSTORM map:" + DBtoSTORM + "\nSTORMtoDB map:" + STORMtoDB; + } + +} diff --git a/src/main/java/it/grid/storm/persistence/converter/OverwriteModeConverter.java b/src/main/java/it/grid/storm/persistence/converter/OverwriteModeConverter.java new file mode 100644 index 000000000..f8b53b97b --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/OverwriteModeConverter.java @@ -0,0 +1,53 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.converter; + +import static it.grid.storm.srm.types.TOverwriteMode.ALWAYS; +import static it.grid.storm.srm.types.TOverwriteMode.NEVER; +import static it.grid.storm.srm.types.TOverwriteMode.WHENFILESAREDIFFERENT; + +import java.util.Map; + +import com.google.common.collect.Maps; + +import it.grid.storm.srm.types.TOverwriteMode; + +/** + * Package private auxiliary class used to convert between DB and StoRM object model representation + * of TOverwriteMode. + */ +public class OverwriteModeConverter { + + private static Map DBtoSTORM = Maps.newHashMap(); + private static Map STORMtoDB = Maps.newHashMap(); + + static { + + STORMtoDB.put(NEVER, "N"); + STORMtoDB.put(ALWAYS, "A"); + STORMtoDB.put(WHENFILESAREDIFFERENT, "D"); + + DBtoSTORM.put("N", NEVER); + DBtoSTORM.put("A", ALWAYS); + DBtoSTORM.put("D", WHENFILESAREDIFFERENT); + } + + public static String toDB(TOverwriteMode om) { + + if (STORMtoDB.containsKey(om)) { + return STORMtoDB.get(om); + } + return "N"; + } + + public static TOverwriteMode toSTORM(String s) { + + if (DBtoSTORM.containsKey(s)) { + return DBtoSTORM.get(s); + } + return TOverwriteMode.EMPTY; + } + +} diff --git a/src/main/java/it/grid/storm/persistence/converter/PinLifetimeConverter.java b/src/main/java/it/grid/storm/persistence/converter/PinLifetimeConverter.java new file mode 100644 index 000000000..0332dfe4f --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/PinLifetimeConverter.java @@ -0,0 +1,74 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.converter; + +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.config.StormConfiguration; + +/** + * Class that handles DB representation of a TLifetimeInSeconds, in particular it takes care of + * protocol specification: + * + * 0/null/negative are translated as default StoRM configurable values. StoRMs Empty + * TLifeTimeInSeconds is translated as 0. + * + * @author EGRID ICTP + * @version 1.0 + * @date March 2007 + */ +public class PinLifetimeConverter { + + private static PinLifetimeConverter stc = new PinLifetimeConverter(); + + private PinLifetimeConverter() { + + } + + /** + * Method that returns the only instance of SizeInBytesIntConverter + */ + public static PinLifetimeConverter getInstance() { + + return stc; + } + + /** + * Method that translates the Empty TLifeTimeInSeconds into the empty representation of DB which + * is 0. Any other value is left as is. + */ + public int toDB(long l) { + + if (l == TLifeTimeInSeconds.makeEmpty().value()) + return 0; + return Long.valueOf(l).intValue(); + } + + /** + * Method that returns the long corresponding to the int value in the DB, except if it is 0, NULL + * or negative; a configurable default value is returned instead, corresponding to the + * getPinLifetimeMinimum() Configuration class method. + */ + public long toStoRM(int s) { + + if (s == 0) { + return StormConfiguration.getInstance().getPinLifetimeDefault(); + } else if (s < 0) { + // The default is used also as a Minimum + return StormConfiguration.getInstance().getPinLifetimeDefault(); + } + return Integer.valueOf(s).longValue(); + } + + public long toStoRM(long s) { + + if (s == 0) { + return StormConfiguration.getInstance().getPinLifetimeDefault(); + } else if (s < 0) { + // The default is used also as a Minimum + return StormConfiguration.getInstance().getPinLifetimeDefault(); + } + return s; + } +} diff --git a/src/main/java/it/grid/storm/catalogs/RequestTypeConverter.java b/src/main/java/it/grid/storm/persistence/converter/RequestTypeConverter.java similarity index 82% rename from src/main/java/it/grid/storm/catalogs/RequestTypeConverter.java rename to src/main/java/it/grid/storm/persistence/converter/RequestTypeConverter.java index aaaea597e..272c0d007 100644 --- a/src/main/java/it/grid/storm/catalogs/RequestTypeConverter.java +++ b/src/main/java/it/grid/storm/persistence/converter/RequestTypeConverter.java @@ -2,12 +2,14 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; -import static it.grid.storm.catalogs.RequestSummaryDataTO.BOL_REQUEST_TYPE; -import static it.grid.storm.catalogs.RequestSummaryDataTO.COPY_REQUEST_TYPE; -import static it.grid.storm.catalogs.RequestSummaryDataTO.PTG_REQUEST_TYPE; -import static it.grid.storm.catalogs.RequestSummaryDataTO.PTP_REQUEST_TYPE; + +package it.grid.storm.persistence.converter; + +import static it.grid.storm.persistence.model.RequestSummaryDataTO.BOL_REQUEST_TYPE; +import static it.grid.storm.persistence.model.RequestSummaryDataTO.COPY_REQUEST_TYPE; +import static it.grid.storm.persistence.model.RequestSummaryDataTO.PTG_REQUEST_TYPE; +import static it.grid.storm.persistence.model.RequestSummaryDataTO.PTP_REQUEST_TYPE; import static it.grid.storm.srm.types.TRequestType.BRING_ON_LINE; import static it.grid.storm.srm.types.TRequestType.COPY; import static it.grid.storm.srm.types.TRequestType.EMPTY; @@ -24,7 +26,7 @@ * Package private auxiliary class used to convert between DB and StoRM object model representation * of the request type. */ -class RequestTypeConverter { +public class RequestTypeConverter { private Map dbToStorm = Maps.newHashMap(); private Map stormToDb = Maps.newHashMap(); diff --git a/src/main/java/it/grid/storm/persistence/converter/SizeInBytesIntConverter.java b/src/main/java/it/grid/storm/persistence/converter/SizeInBytesIntConverter.java new file mode 100644 index 000000000..6b54db7e6 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/SizeInBytesIntConverter.java @@ -0,0 +1,56 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.converter; + +import it.grid.storm.srm.types.TSizeInBytes; + +/** + * Class that handles DB representation of a TSizeInBytes, in particular it takes care of the NULL + * logic of the DB: 0/null are used to mean an empty field, whereas StoRM Object model uses the type + * TSizeInBytes.makeEmpty(); moreover StoRM does accept 0 as a valid TSizeInBytes, so it _is_ + * important to use this converter! + * + * @author EGRID ICTP + * @version 2.0 + * @date July 2005 + */ +public class SizeInBytesIntConverter { + + private static SizeInBytesIntConverter stc = new SizeInBytesIntConverter(); + + private SizeInBytesIntConverter() { + + } + + /** + * Method that returns the only instance of SizeInBytesIntConverter + */ + public static SizeInBytesIntConverter getInstance() { + + return stc; + } + + /** + * Method that transaltes the Empty TSizeInBytes into the empty representation of DB which is 0. + * Any other int is left as is. + */ + public long toDB(long s) { + + if (s == TSizeInBytes.makeEmpty().value()) + return 0; + return s; + } + + /** + * Method that returns the int as is, except if it is 0 which DB interprests as empty field: in + * that case it then returns the Empty TSizeInBytes int representation. + */ + public long toStoRM(long s) { + + if (s == 0) + return TSizeInBytes.makeEmpty().value(); + return s; + } +} diff --git a/src/main/java/it/grid/storm/persistence/converter/SpaceTokenStringConverter.java b/src/main/java/it/grid/storm/persistence/converter/SpaceTokenStringConverter.java new file mode 100644 index 000000000..00295273d --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/SpaceTokenStringConverter.java @@ -0,0 +1,42 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.converter; + +import it.grid.storm.srm.types.TSpaceToken; + +/** + * Class that handles DPM DB representation of a SpaceToken, in particular it takes care of the + * NULL/EMPTY logic of DPM. In particular DPM uses the empty string "" as meaning the absence of a + * value for the field, whereas StoRM accepts it as a valid String with which to create a + * TSpaceToken; moreover StoRM uses an Empty TSpaceToken type. + * + * @author EGRID ICTP + * @version 1.0 + * @date June 2005 + */ +public class SpaceTokenStringConverter { + + /** + * Method that translates StoRM Empty TSpaceToken String representation into DPM empty + * representation; all other Strings are left as are. + */ + public static String toDB(String s) { + + if (s.equals(TSpaceToken.makeEmpty().toString())) + return ""; + return s; + } + + /** + * Method that translates DPM String representing an Empty TSpaceToken into StoRM representation; + * any other String is left as is. + */ + public static String toStoRM(String s) { + + if ((s == null) || (s.equals(""))) + return TSpaceToken.makeEmpty().toString(); + return s; + } +} diff --git a/src/main/java/it/grid/storm/catalogs/StatusCodeConverter.java b/src/main/java/it/grid/storm/persistence/converter/StatusCodeConverter.java similarity index 99% rename from src/main/java/it/grid/storm/catalogs/StatusCodeConverter.java rename to src/main/java/it/grid/storm/persistence/converter/StatusCodeConverter.java index 0d4415082..fb5490e73 100644 --- a/src/main/java/it/grid/storm/catalogs/StatusCodeConverter.java +++ b/src/main/java/it/grid/storm/persistence/converter/StatusCodeConverter.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.converter; import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED; import static it.grid.storm.srm.types.TStatusCode.SRM_AUTHENTICATION_FAILURE; diff --git a/src/main/java/it/grid/storm/persistence/converter/TURLConverter.java b/src/main/java/it/grid/storm/persistence/converter/TURLConverter.java new file mode 100644 index 000000000..f8deebbab --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/TURLConverter.java @@ -0,0 +1,55 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.converter; + +import it.grid.storm.srm.types.TTURL; + +/** + * Class that handles DPM DB representation of a TTURL, in particular it takes care of the + * NULL/EMPTY logic of DPM. Indeed DPM uses 0/null to mean an empty field, whereas StoRM uses the + * type TTURL.makeEmpty(); in particular StoRM converts an empty String or a null to an Empty TTURL! + * + * @author EGRID ICTP + * @version 1.0 + * @date March 2006 + */ +public class TURLConverter { + + private static TURLConverter stc = new TURLConverter(); // only instance + + private TURLConverter() { + + } + + /** + * Method that returns the only instance of SizeInBytesIntConverter + */ + public static TURLConverter getInstance() { + + return stc; + } + + /** + * Method that transaltes the Empty TTURL into the empty representation of DPM which is a null! + * Any other String is left as is. + */ + public String toDB(String s) { + + if (s.equals(TTURL.makeEmpty().toString())) + return null; + return s; + } + + /** + * Method that translates DPMs "" or null String as the Empty TTURL String representation. Any + * other String is left as is. + */ + public String toStoRM(String s) { + + if ((s == null) || (s.equals(""))) + return TTURL.makeEmpty().toString(); + return s; + } +} diff --git a/src/main/java/it/grid/storm/persistence/converter/TransferProtocolListConverter.java b/src/main/java/it/grid/storm/persistence/converter/TransferProtocolListConverter.java new file mode 100644 index 000000000..7eefa8629 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/TransferProtocolListConverter.java @@ -0,0 +1,53 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.converter; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.namespace.model.Protocol; + +import java.util.Iterator; +import java.util.List; +import java.util.ArrayList; + +/** + * Package private auxiliary class used to convert between the DB raw data representation and StoRM + * s Object model list of transfer protocols. + * + */ + +public class TransferProtocolListConverter { + + /** + * Method that returns a List of Uppercase Strings used in the DB to represent the given + * TURLPrefix. An empty List is returned in case the conversion does not succeed, a null + * TURLPrefix is supplied, or its size is 0. + */ + public static List toDB(TURLPrefix turlPrefix) { + + List result = new ArrayList(); + Protocol protocol; + for (Iterator it = turlPrefix.getDesiredProtocols().iterator(); it.hasNext();) { + protocol = it.next(); + result.add(protocol.getSchema()); + } + return result; + } + + /** + * Method that returns a TURLPrefix of transfer protocol. If the translation cannot take place, a + * TURLPrefix of size 0 is returned. Likewise if a null List is supplied. + */ + public static TURLPrefix toSTORM(List listOfProtocol) { + + TURLPrefix turlPrefix = new TURLPrefix(); + Protocol protocol = null; + for (Iterator i = listOfProtocol.iterator(); i.hasNext();) { + protocol = Protocol.getProtocol(i.next()); + if (!(protocol.equals(Protocol.UNKNOWN))) + turlPrefix.addProtocol(protocol); + } + return turlPrefix; + } +} diff --git a/src/main/java/it/grid/storm/persistence/dao/AbstractDAO.java b/src/main/java/it/grid/storm/persistence/dao/AbstractDAO.java index 733bc176f..85d80874d 100644 --- a/src/main/java/it/grid/storm/persistence/dao/AbstractDAO.java +++ b/src/main/java/it/grid/storm/persistence/dao/AbstractDAO.java @@ -2,12 +2,8 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.persistence.dao; -import it.grid.storm.persistence.DataSourceConnectionFactory; -import it.grid.storm.persistence.PersistenceDirector; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.exceptions.PersistenceException; +package it.grid.storm.persistence.dao; import java.sql.Connection; import java.sql.ResultSet; @@ -17,156 +13,71 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import it.grid.storm.persistence.pool.impl.DefaultDatabaseConnectionPool; + public abstract class AbstractDAO { - private static final Logger log = LoggerFactory.getLogger(AbstractDAO.class); - - private DataSourceConnectionFactory connFactory; - - public AbstractDAO() { - connFactory = PersistenceDirector.getConnectionFactory(); - } - - protected void commit(Connection conn) { - - try { - conn.commit(); - conn.setAutoCommit(true); - } catch (SQLException e) { - log.error(e.getMessage(), e); - } - } - - protected Connection getConnection() throws DataAccessException { - - Connection conn = null; - try { - conn = connFactory.borrowConnection(); - } catch (PersistenceException ex) { - throw new DataAccessException(ex); - } - return conn; - } - - protected Statement getStatement(Connection conn) throws DataAccessException { - - Statement stat = null; - if (conn == null) { - throw new DataAccessException( - "No Connection available to create a Statement"); - } else { - try { - stat = conn.createStatement(); - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException(e); - } - } - return stat; - } - - /** - * Release a connection Accessor method. - * - * @param resultSet - * ResultSet - * @param statement - * Statement - * @param connection - * Connection - * @throws DataAccessException - */ - protected void releaseConnection(ResultSet resultSet, Statement statement, - Connection connection) throws DataAccessException { - - // Release the ResultSet - closeResultSet(resultSet); - - // Close the statement - closeStatement(statement); - - // Release the connection - closeConnection(connection); - } - - /** - * Release a connection and a list of statements and result sets Accessor - * method. - * - * @param resultSets - * @param statements - * @param connection - * @throws DataAccessException - */ - protected void releaseConnection(ResultSet[] resultSets, - Statement[] statements, Connection connection) throws DataAccessException { - - // Release the ResultSets - if (resultSets != null) { - for (ResultSet resultSet : resultSets) { - closeResultSet(resultSet); - } - } - // Close the statement - if (statements != null) { - for (Statement statement : statements) { - closeStatement(statement); - } - } - // Release the connection - closeConnection(connection); - } - - private void closeResultSet(ResultSet resultSet) throws DataAccessException { - - if (resultSet != null) { - try { - resultSet.close(); - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException(e); - } - } - } - - private void closeStatement(Statement statement) throws DataAccessException { - - if (statement != null) { - try { - statement.close(); - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException(e); - } - } - } - - private void closeConnection(Connection connection) - throws DataAccessException { - - if (connection != null) { - try { - connFactory.giveBackConnection(connection); - } catch (PersistenceException e) { - log.error(e.getMessage(), e); - throw new DataAccessException(e); - } - } - } - - /** - * @param conn - */ - protected void rollback(Connection conn) { - - try { - - conn.rollback(); - conn.setAutoCommit(true); - - } catch (SQLException e) { - log.error(e.getMessage(), e); - } - } - -} + private static final Logger log = LoggerFactory.getLogger(AbstractDAO.class); + + private final DefaultDatabaseConnectionPool connectionPool; + + public AbstractDAO(DefaultDatabaseConnectionPool connectionPool) { + + this.connectionPool = connectionPool; + } + + protected Connection getConnection() throws SQLException { + + Connection con = connectionPool.getConnection(); + con.setAutoCommit(true); + return con; + } + + protected Connection getManagedConnection() throws SQLException { + + Connection con = connectionPool.getConnection(); + con.setAutoCommit(false); + return con; + } + + protected void closeResultSet(ResultSet resultSet) { + + try { + if (resultSet != null) { + resultSet.close(); + } + } catch (SQLException e) { + handleSQLException(e); + } + } + + protected void closeStatement(Statement statement) { + + try { + if (statement != null) { + statement.close(); + } + } catch (SQLException e) { + handleSQLException(e); + } + } + + protected void closeConnection(Connection connection) { + + try { + if (connection != null) { + connection.close(); + } + } catch (SQLException e) { + handleSQLException(e); + } + } + + private void handleSQLException(SQLException e) { + + log.error("SQL Error: {}, SQLState: {}, VendorError: {}.", e.getMessage(), e.getSQLState(), + e.getErrorCode(), e); + e.printStackTrace(); + + } +} \ No newline at end of file diff --git a/src/main/java/it/grid/storm/persistence/dao/BoLChunkDAO.java b/src/main/java/it/grid/storm/persistence/dao/BoLChunkDAO.java new file mode 100644 index 000000000..feb3517cb --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/dao/BoLChunkDAO.java @@ -0,0 +1,46 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.dao; + +import java.util.Collection; + +import it.grid.storm.persistence.model.BoLChunkDataTO; +import it.grid.storm.persistence.model.ReducedBoLChunkDataTO; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TStatusCode; + +public interface BoLChunkDAO { + + void addChild(BoLChunkDataTO to); + + void addNew(BoLChunkDataTO to, String clientDn); + + void update(BoLChunkDataTO to); + + void updateIncomplete(ReducedBoLChunkDataTO to); + + Collection find(TRequestToken requestToken); + + Collection findReduced(TRequestToken requestToken); + + Collection findReduced(TRequestToken requestToken, int[] surlUniqueIDs, + String[] surls); + + Collection findReduced(String griduser, int[] surlUniqueIDs, + String[] surls); + + int updateStatus(BoLChunkDataTO to, TStatusCode status, String explanation); + + int releaseExpiredAndSuccessfulRequests(); + + int abortInProgressRequestsSince(long expirationTimeInSeconds); + + void updateStatusOnMatchingStatus(TRequestToken requestToken, TStatusCode expectedStatusCode, + TStatusCode newStatusCode, String explanation); + + Collection find(int[] surlsUniqueIDs, String[] surlsArray, String dn); + + Collection find(int[] surlsUniqueIDs, String[] surlsArray); +} diff --git a/src/main/java/it/grid/storm/persistence/dao/PtGChunkDAO.java b/src/main/java/it/grid/storm/persistence/dao/PtGChunkDAO.java index 252a26797..dd8d9f89e 100644 --- a/src/main/java/it/grid/storm/persistence/dao/PtGChunkDAO.java +++ b/src/main/java/it/grid/storm/persistence/dao/PtGChunkDAO.java @@ -5,19 +5,50 @@ package it.grid.storm.persistence.dao; import java.util.Collection; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.model.PtGChunkTO; + +import it.grid.storm.persistence.model.PtGChunkDataTO; +import it.grid.storm.persistence.model.ReducedPtGChunkDataTO; import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TStatusCode; public interface PtGChunkDAO { - public PtGChunkTO getPtGChunkDataById(Long ssId) throws DataAccessException; + public void addChild(PtGChunkDataTO to); + + public void addNew(PtGChunkDataTO to, String clientDn); + + public void update(PtGChunkDataTO to); + + public void updateIncomplete(ReducedPtGChunkDataTO chunkTO); + + public PtGChunkDataTO refresh(long primaryKey); + + public Collection find(TRequestToken requestToken); + + public Collection findReduced(TRequestToken requestToken); + + public Collection findReduced(TRequestToken requestToken, + int[] surlsUniqueIDs, String[] surlsArray); + + public Collection findReduced(String griduser, int[] surlUniqueIDs, + String[] surls); + + public void fail(PtGChunkDataTO auxTO); + + public int numberInSRM_FILE_PINNED(int surlUniqueID); + + public int count(int surlUniqueID, TStatusCode status); + + public Collection transitExpiredSRM_FILE_PINNED(); + + public void transitSRM_FILE_PINNEDtoSRM_RELEASED(long[] ids); - public void addPtGChunkData(PtGChunkTO ptgChunkTO) throws DataAccessException; + public void transitSRM_FILE_PINNEDtoSRM_RELEASED(long[] ids, TRequestToken token); - public Collection getPtGChunksDataByToken(TRequestToken token) - throws DataAccessException; + public void updateStatus(TRequestToken requestToken, int[] surlUniqueIDs, + String[] surls, TStatusCode statusCode, String explanation); - public void removePtGChunksData(PtGChunkTO ptgChunkTO) - throws DataAccessException; + public void updateStatusOnMatchingStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation); } diff --git a/src/main/java/it/grid/storm/persistence/dao/PtPChunkDAO.java b/src/main/java/it/grid/storm/persistence/dao/PtPChunkDAO.java index 3db4fa5d9..18e75b0ce 100644 --- a/src/main/java/it/grid/storm/persistence/dao/PtPChunkDAO.java +++ b/src/main/java/it/grid/storm/persistence/dao/PtPChunkDAO.java @@ -5,20 +5,43 @@ package it.grid.storm.persistence.dao; import java.util.Collection; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.model.PtPChunkTO; +import java.util.Map; + +import it.grid.storm.persistence.model.PtPChunkDataTO; +import it.grid.storm.persistence.model.ReducedPtPChunkDataTO; import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TStatusCode; public interface PtPChunkDAO { - public PtPChunkTO getPtGChunkDataById(Long ssId) throws DataAccessException; + public void update(PtPChunkDataTO to); + + public void updateIncomplete(ReducedPtPChunkDataTO chunkTO); + + public Collection find(TRequestToken requestToken); + + public Collection find(int[] surlsUniqueIDs, String[] surlsArray, String dn); + + public int fail(PtPChunkDataTO auxTO); + + public Map getExpiredSRM_SPACE_AVAILABLE(); + + public Map getExpired(TStatusCode status); + + public int transitExpiredSRM_SPACE_AVAILABLEtoSRM_FILE_LIFETIME_EXPIRED(Collection ids); + + public int transitLongTimeInProgressRequestsToStatus(long expirationTime, TStatusCode status, + String explanation); + + public int updateStatus(Collection ids, TStatusCode fromStatus, TStatusCode toStatus, + String explanation); - public void addPtGChunkData(PtPChunkTO ptpChunkData) - throws DataAccessException; + public int updateStatus(TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls, + TStatusCode statusCode, String explanation); - public Collection getPtPChunksDataByToken(TRequestToken token) - throws DataAccessException; + public int updateStatusOnMatchingStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation); - public void removePtGChunksData(PtPChunkTO ptpChunkData) - throws DataAccessException; + public int updateStatusOnMatchingStatus(TRequestToken requestToken, int[] surlsUniqueIDs, + String[] surls, TStatusCode expectedStatusCode, TStatusCode newStatusCode); } diff --git a/src/main/java/it/grid/storm/persistence/dao/RequestSummaryDAO.java b/src/main/java/it/grid/storm/persistence/dao/RequestSummaryDAO.java index c0c3f8b75..a0438c165 100644 --- a/src/main/java/it/grid/storm/persistence/dao/RequestSummaryDAO.java +++ b/src/main/java/it/grid/storm/persistence/dao/RequestSummaryDAO.java @@ -4,17 +4,43 @@ */ package it.grid.storm.persistence.dao; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.model.RequestSummaryTO; +import java.util.Collection; + +import it.grid.storm.persistence.model.RequestSummaryDataTO; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TRequestType; +import it.grid.storm.srm.types.TStatusCode; public interface RequestSummaryDAO { - public RequestSummaryTO getRequestSummaryById(Long ssId) - throws DataAccessException; + Collection fetchNewRequests(int limit); + + void failRequest(long requestId, String explanation); + + void failPtGRequest(long requestId, String explanation); + + void failPtPRequest(long requestId, String explanation); + + void updateGlobalStatus(TRequestToken requestToken, TStatusCode status, String explanation); + + void updateGlobalStatusOnMatchingGlobalStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation); + + void updateGlobalStatusPinFileLifetime(TRequestToken requestToken, TStatusCode status, + String explanation); + + void abortRequest(TRequestToken requestToken); + + void abortInProgressRequest(TRequestToken requestToken); + + void abortChunksOfInProgressRequest(TRequestToken requestToken, Collection surls); + + TRequestType getRequestType(TRequestToken requestToken); + + RequestSummaryDataTO find(TRequestToken requestToken); + + Collection purgeExpiredRequests(long expiredRequestTime, int purgeSize); - public void addRequestSummary(RequestSummaryTO rsd) - throws DataAccessException; + int getNumberExpired(); - public void removeRequestSummary(RequestSummaryTO rsd) - throws DataAccessException; } diff --git a/src/main/java/it/grid/storm/persistence/dao/SURLStatusDAO.java b/src/main/java/it/grid/storm/persistence/dao/SURLStatusDAO.java new file mode 100644 index 000000000..942ef0c7a --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/dao/SURLStatusDAO.java @@ -0,0 +1,44 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.dao; + +import java.util.List; +import java.util.Map; + +import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; + +public interface SURLStatusDAO { + + boolean abortActivePtGsForSURL(GridUserInterface user, TSURL surl, String explanation); + + boolean abortActivePtPsForSURL(GridUserInterface user, TSURL surl, String explanation); + + Map getPinnedSURLsForUser(GridUserInterface user, List surls); + + Map getPinnedSURLsForUser(GridUserInterface user, TRequestToken token, + List surls); + + Map getSURLStatuses(TRequestToken token); + + Map getSURLStatuses(TRequestToken token, List surls); + + int markSURLsReadyForRead(TRequestToken token, List surls); + + void releaseSURL(TSURL surl); + + void releaseSURLs(GridUserInterface user, List surls); + + void releaseSURLs(List surls); + + void releaseSURLs(TRequestToken token, List surls); + + boolean surlHasOngoingPtGs(TSURL surl); + + boolean surlHasOngoingPtPs(TSURL surl, TRequestToken token); + +} diff --git a/src/main/java/it/grid/storm/persistence/dao/StorageAreaDAO.java b/src/main/java/it/grid/storm/persistence/dao/StorageAreaDAO.java deleted file mode 100644 index b244c951e..000000000 --- a/src/main/java/it/grid/storm/persistence/dao/StorageAreaDAO.java +++ /dev/null @@ -1,8 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.persistence.dao; - -public interface StorageAreaDAO { -} diff --git a/src/main/java/it/grid/storm/persistence/dao/StorageSpaceDAO.java b/src/main/java/it/grid/storm/persistence/dao/StorageSpaceDAO.java index c8a727b16..35abd04e7 100644 --- a/src/main/java/it/grid/storm/persistence/dao/StorageSpaceDAO.java +++ b/src/main/java/it/grid/storm/persistence/dao/StorageSpaceDAO.java @@ -16,59 +16,48 @@ * * Storage Space Data Access Object (DAO) * - * DAO pattern - * - * */ public interface StorageSpaceDAO { - public StorageSpaceTO getStorageSpaceById(Long ssId) - throws DataAccessException; + public StorageSpaceTO getStorageSpaceById(Long ssId) throws DataAccessException; - public Collection getStorageSpaceByOwner( - GridUserInterface owner, String spaceAlias) throws DataAccessException; + public Collection getStorageSpaceByOwner(GridUserInterface owner, + String spaceAlias) throws DataAccessException; + + public Collection getStorageSpaceBySpaceType(String stype) + throws DataAccessException; - public Collection getStorageSpaceBySpaceType(String stype) - throws DataAccessException; + public Collection getStorageSpaceByAliasOnly(String spaceAlias) + throws DataAccessException; - public Collection getStorageSpaceByAliasOnly(String spaceAlias) - throws DataAccessException; + public StorageSpaceTO getStorageSpaceByToken(String token) throws DataAccessException; - public StorageSpaceTO getStorageSpaceByToken(String token) - throws DataAccessException; + public Collection findAll() throws DataAccessException; - public Collection findAll() throws DataAccessException; + public void addStorageSpace(StorageSpaceTO ss) throws DataAccessException; - public void addStorageSpace(StorageSpaceTO ss) throws DataAccessException; + public void removeStorageSpace(GridUserInterface user, String spaceToken) + throws DataAccessException; - public void removeStorageSpace(GridUserInterface user, String spaceToken) - throws DataAccessException; + public void removeStorageSpace(String spaceToken) throws DataAccessException; - public void removeStorageSpace(String spaceToken) throws DataAccessException; + public void updateStorageSpace(StorageSpaceTO ss) throws DataAccessException; - public void updateStorageSpace(StorageSpaceTO ss) throws DataAccessException; + public void updateStorageSpaceFreeSpace(StorageSpaceTO ss) throws DataAccessException; - public void updateStorageSpaceFreeSpace(StorageSpaceTO ss) - throws DataAccessException; + public void updateAllStorageSpace(StorageSpaceTO ss) throws DataAccessException; - public void updateAllStorageSpace(StorageSpaceTO ss) - throws DataAccessException; + public Collection getExpired(long currentTimeInSecond) throws DataAccessException; - public Collection getExpired(long currentTimeInSecond) - throws DataAccessException; + public Collection getStorageSpaceByUnavailableUsedSpace(long unavailableSizeValue) + throws DataAccessException; - public Collection getStorageSpaceByUnavailableUsedSpace( - long unavailableSizeValue) throws DataAccessException; + public Collection getStorageSpaceByPreviousLastUpdate(Date lastUpdateTimestamp) + throws DataAccessException; - public Collection getStorageSpaceByPreviousLastUpdate( - Date lastUpdateTimestamp) throws DataAccessException; - - public int increaseUsedSpace(String spaceToken, long usedSpaceToAdd) - throws DataAccessException; + public int increaseUsedSpace(String spaceToken, long usedSpaceToAdd) throws DataAccessException; - public int decreaseUsedSpace(String spaceToken, long usedSpaceToRemove) + public int decreaseUsedSpace(String spaceToken, long usedSpaceToRemove) throws DataAccessException; } diff --git a/src/main/java/it/grid/storm/persistence/dao/TapeRecallDAO.java b/src/main/java/it/grid/storm/persistence/dao/TapeRecallDAO.java index 75f1d23d3..970734505 100644 --- a/src/main/java/it/grid/storm/persistence/dao/TapeRecallDAO.java +++ b/src/main/java/it/grid/storm/persistence/dao/TapeRecallDAO.java @@ -4,186 +4,176 @@ */ package it.grid.storm.persistence.dao; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.model.TapeRecallTO; - import java.util.Date; import java.util.List; +import java.util.Optional; import java.util.UUID; +import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.model.TapeRecallTO; + /** * Tape Recall Data Access Object (DAO) */ -public abstract class TapeRecallDAO extends AbstractDAO { - - /** - * - * @return - * @throws DataAccessException - */ - public abstract int getNumberInProgress() throws DataAccessException; - - /** - * - * @param voName - * @return - * @throws DataAccessException - */ - public abstract int getNumberInProgress(String voName) - throws DataAccessException; - - /** - * - * @return - * @throws DataAccessException - */ - public abstract int getNumberQueued() throws DataAccessException; - - /** - * - * @param voName - * @return - * @throws DataAccessException - */ - public abstract int getNumberQueued(String voName) throws DataAccessException; - - /** - * - * @return - * @throws DataAccessException - */ - public abstract int getReadyForTakeOver() throws DataAccessException; - - /** - * - * @param voName - * @return - * @throws DataAccessException - */ - public abstract int getReadyForTakeOver(String voName) - throws DataAccessException; - - /** - * @param taskId - * @param requestToken - * @return - * @throws DataAccessException - */ - public abstract TapeRecallTO getTask(UUID taskId, String requestToken) - throws DataAccessException; - - /** - * @param groupTaskId - * @return - * @throws DataAccessException - */ - public abstract List getGroupTasks(UUID groupTaskId) - throws DataAccessException; - - /** - * Verifies that a recall task with the given taskId and request token exists - * on the database - * - * @param taskId - * @param requestToken - * @return true if the recall task exists - * @throws DataAccessException - */ - public abstract boolean existsTask(UUID taskId, String requestToken) - throws DataAccessException; - - /** - * @param groupTaskId - * @return - * @throws DataAccessException - */ - public abstract boolean existsGroupTask(UUID groupTaskId) - throws DataAccessException; - - /** - * Method called by a garbage collector that removes all tape recalls that are - * not in QUEUED (1) or IN_PROGRESS (2) status - * - * @param expirationTime seconds must pass to consider the request as expired - * @param delete at most numMaxToPurge tasks - * @return the amount of tasks deleted - * @throws DataAccessException - */ - public abstract int purgeCompletedTasks(long expirationTime, int numMaxToPurge) - throws DataAccessException; - - /** - * @param taskId - * @param newValue - * @throws DataAccessException - */ - public abstract void setGroupTaskRetryValue(UUID groupTaskId, int value) - throws DataAccessException; - - /** - * - * @return - * @throws DataAccessException - */ - public abstract TapeRecallTO takeoverTask() throws DataAccessException; - - /** - * - * @param voName - * @return - * @throws DataAccessException - */ - public abstract TapeRecallTO takeoverTask(String voName) - throws DataAccessException; - - /** - * Performs the take-over of max numberOfTaks tasks possibly returning more - * than one file recall task for some files - * - * @param numberOfTaks - * @return - * @throws DataAccessException - */ - public abstract List takeoverTasksWithDoubles(int numberOfTaks) - throws DataAccessException; - - /** - * - * @param numberOfTaks - * @param voName - * @return - * @throws DataAccessException - */ - public abstract List takeoverTasksWithDoubles(int numberOfTaks, - String voName) throws DataAccessException; - - /** - * @param task - * @param statuses - * @param proposedGroupTaskId - * @return - * @throws DataAccessException - */ - public abstract UUID insertCloneTask(TapeRecallTO task, int[] statuses, - UUID proposedGroupTaskId) throws DataAccessException; - - /** - * @param groupTaskId - * @param statusId - * @return - * @throws DataAccessException - */ - public abstract boolean setGroupTaskStatus(UUID groupTaskId, int statusId, - Date timestamp) throws DataAccessException; - - /** - * - * @param numberOfTaks - * @return - * @throws DataAccessException - */ - public abstract List getAllInProgressTasks(int numberOfTaks) - throws DataAccessException; - -} \ No newline at end of file +public interface TapeRecallDAO { + + /** + * + * @return + * @throws DataAccessException + */ + public int getNumberInProgress() throws DataAccessException; + + /** + * + * @param voName + * @return + * @throws DataAccessException + */ + public int getNumberInProgress(String voName) throws DataAccessException; + + /** + * + * @return + * @throws DataAccessException + */ + public int getNumberQueued() throws DataAccessException; + + /** + * + * @param voName + * @return + * @throws DataAccessException + */ + public int getNumberQueued(String voName) throws DataAccessException; + + /** + * + * @return + * @throws DataAccessException + */ + public int getReadyForTakeOver() throws DataAccessException; + + /** + * + * @param voName + * @return + * @throws DataAccessException + */ + public int getReadyForTakeOver(String voName) throws DataAccessException; + + /** + * @param taskId + * @param requestToken + * @return + * @throws DataAccessException + */ + public Optional getTask(UUID taskId, String requestToken) + throws DataAccessException; + + /** + * @param groupTaskId + * @return + * @throws DataAccessException + */ + public List getGroupTasks(UUID groupTaskId) throws DataAccessException; + + /** + * Verifies that a recall task with the given taskId and request token exists on the database + * + * @param taskId + * @param requestToken + * @return true if the recall task exists + * @throws DataAccessException + */ + public boolean existsTask(UUID taskId, String requestToken) throws DataAccessException; + + /** + * @param groupTaskId + * @return + * @throws DataAccessException + */ + public boolean existsGroupTask(UUID groupTaskId) throws DataAccessException; + + /** + * Method called by a garbage collector that removes all tape recalls that are not in QUEUED (1) + * or IN_PROGRESS (2) status + * + * @param expirationTime seconds must pass to consider the request as expired + * @param delete at most numMaxToPurge tasks + * @return the amount of tasks deleted + * @throws DataAccessException + */ + public int purgeCompletedTasks(long expirationTime, int numMaxToPurge) throws DataAccessException; + + /** + * @param taskId + * @param newValue + * @throws DataAccessException + */ + public void setGroupTaskRetryValue(UUID groupTaskId, int value) throws DataAccessException; + + /** + * + * @return + * @throws DataAccessException + */ + public TapeRecallTO takeoverTask() throws DataAccessException; + + /** + * + * @param voName + * @return + * @throws DataAccessException + */ + public TapeRecallTO takeoverTask(String voName) throws DataAccessException; + + /** + * Performs the take-over of max numberOfTaks tasks possibly returning more than one file recall + * task for some files + * + * @param numberOfTaks + * @return + * @throws DataAccessException + */ + public List takeoverTasksWithDoubles(int numberOfTaks) throws DataAccessException; + + /** + * + * @param numberOfTaks + * @param voName + * @return + * @throws DataAccessException + */ + public List takeoverTasksWithDoubles(int numberOfTaks, String voName) + throws DataAccessException; + + /** + * @param task + * @param statuses + * @param proposedGroupTaskId + * @return + * @throws DataAccessException + */ + public UUID insertCloneTask(TapeRecallTO task, int[] statuses, UUID proposedGroupTaskId) + throws DataAccessException; + + /** + * @param groupTaskId + * @param statusId + * @return + * @throws DataAccessException + */ + public boolean setGroupTaskStatus(UUID groupTaskId, int statusId, Date timestamp) + throws DataAccessException; + + /** + * + * @param numberOfTaks + * @return + * @throws DataAccessException + */ + public List getAllInProgressTasks(int numberOfTaks) throws DataAccessException; + +} diff --git a/src/main/java/it/grid/storm/persistence/dao/VolatileAndJiTDAO.java b/src/main/java/it/grid/storm/persistence/dao/VolatileAndJiTDAO.java new file mode 100644 index 000000000..9980bf98e --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/dao/VolatileAndJiTDAO.java @@ -0,0 +1,40 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.dao; + +import java.util.List; + +/** + * DAO class for VolatileAndJiTCatalog: it has been specifically designed for MySQL. + * + */ + +public interface VolatileAndJiTDAO { + + public void addJiT(String filename, int uid, int gid, int acl, long start, long pinLifetime); + + public void addVolatile(String filename, long start, long fileLifetime); + + public boolean exists(String filename); + + public void forceUpdateJiT(String filename, int uid, int acl, long start, long pinLifetime); + + public int numberJiT(String filename, int uid, int acl); + + public int numberVolatile(String filename); + + public void removeAllJiTsOn(String filename); + + public List removeExpired(long time); + + public void updateJiT(String filename, int uid, int acl, long start, long pinLifetime); + + public void updateVolatile(String filename, long start, long fileLifetime); + + public void updateVolatile(String fileName, long fileStart); + + public List volatileInfoOn(String filename); + +} diff --git a/src/main/java/it/grid/storm/persistence/exceptions/InfrastructureException.java b/src/main/java/it/grid/storm/persistence/exceptions/InfrastructureException.java deleted file mode 100644 index 1969ad243..000000000 --- a/src/main/java/it/grid/storm/persistence/exceptions/InfrastructureException.java +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.persistence.exceptions; - -/** - * This exception is used to mark (fatal) failures in infrastructure and system - * code. - * - * @author Christian Bauer - */ -public class InfrastructureException extends RuntimeException { - - public InfrastructureException() { - - } - - public InfrastructureException(String message) { - - super(message); - } - - public InfrastructureException(String message, Throwable cause) { - - super(message, cause); - } - - public InfrastructureException(Throwable cause) { - - super(cause); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidBoLDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidBoLDataAttributesException.java similarity index 98% rename from src/main/java/it/grid/storm/catalogs/InvalidBoLDataAttributesException.java rename to src/main/java/it/grid/storm/persistence/exceptions/InvalidBoLDataAttributesException.java index 408d6e23f..791249c5d 100644 --- a/src/main/java/it/grid/storm/catalogs/InvalidBoLDataAttributesException.java +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidBoLDataAttributesException.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.exceptions; import it.grid.storm.common.types.TURLPrefix; import it.grid.storm.srm.types.TDirOption; diff --git a/src/main/java/it/grid/storm/catalogs/InvalidBoLPersistentChunkDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidBoLPersistentChunkDataAttributesException.java similarity index 98% rename from src/main/java/it/grid/storm/catalogs/InvalidBoLPersistentChunkDataAttributesException.java rename to src/main/java/it/grid/storm/persistence/exceptions/InvalidBoLPersistentChunkDataAttributesException.java index a552df2fd..e8f900428 100644 --- a/src/main/java/it/grid/storm/catalogs/InvalidBoLPersistentChunkDataAttributesException.java +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidBoLPersistentChunkDataAttributesException.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.exceptions; import it.grid.storm.srm.types.TDirOption; import it.grid.storm.srm.types.TRequestToken; diff --git a/src/main/java/it/grid/storm/catalogs/InvalidFileTransferDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidFileTransferDataAttributesException.java similarity index 97% rename from src/main/java/it/grid/storm/catalogs/InvalidFileTransferDataAttributesException.java rename to src/main/java/it/grid/storm/persistence/exceptions/InvalidFileTransferDataAttributesException.java index 0acc4dfb7..677d0d36e 100644 --- a/src/main/java/it/grid/storm/catalogs/InvalidFileTransferDataAttributesException.java +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidFileTransferDataAttributesException.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.exceptions; import it.grid.storm.common.types.TURLPrefix; import it.grid.storm.srm.types.TReturnStatus; diff --git a/src/main/java/it/grid/storm/catalogs/InvalidPtGDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtGDataAttributesException.java similarity index 98% rename from src/main/java/it/grid/storm/catalogs/InvalidPtGDataAttributesException.java rename to src/main/java/it/grid/storm/persistence/exceptions/InvalidPtGDataAttributesException.java index 232f2e61d..7385eaf85 100644 --- a/src/main/java/it/grid/storm/catalogs/InvalidPtGDataAttributesException.java +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtGDataAttributesException.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.exceptions; import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TLifeTimeInSeconds; diff --git a/src/main/java/it/grid/storm/catalogs/InvalidPtGPersistentChunkDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtGPersistentChunkDataAttributesException.java similarity index 98% rename from src/main/java/it/grid/storm/catalogs/InvalidPtGPersistentChunkDataAttributesException.java rename to src/main/java/it/grid/storm/persistence/exceptions/InvalidPtGPersistentChunkDataAttributesException.java index 4b37da945..1d782eed3 100644 --- a/src/main/java/it/grid/storm/catalogs/InvalidPtGPersistentChunkDataAttributesException.java +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtGPersistentChunkDataAttributesException.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.exceptions; import it.grid.storm.srm.types.TDirOption; import it.grid.storm.srm.types.TRequestToken; diff --git a/src/main/java/it/grid/storm/catalogs/InvalidPtPDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtPDataAttributesException.java similarity index 99% rename from src/main/java/it/grid/storm/catalogs/InvalidPtPDataAttributesException.java rename to src/main/java/it/grid/storm/persistence/exceptions/InvalidPtPDataAttributesException.java index 0edbc8ce6..d93d76e6c 100644 --- a/src/main/java/it/grid/storm/catalogs/InvalidPtPDataAttributesException.java +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtPDataAttributesException.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.exceptions; import it.grid.storm.common.types.TURLPrefix; import it.grid.storm.srm.types.TFileStorageType; diff --git a/src/main/java/it/grid/storm/catalogs/InvalidPtPPersistentChunkDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtPPersistentChunkDataAttributesException.java similarity index 98% rename from src/main/java/it/grid/storm/catalogs/InvalidPtPPersistentChunkDataAttributesException.java rename to src/main/java/it/grid/storm/persistence/exceptions/InvalidPtPPersistentChunkDataAttributesException.java index 2d37dda1a..992be1919 100644 --- a/src/main/java/it/grid/storm/catalogs/InvalidPtPPersistentChunkDataAttributesException.java +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtPPersistentChunkDataAttributesException.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.exceptions; import it.grid.storm.srm.types.TRequestToken; import it.grid.storm.srm.types.TSURL; diff --git a/src/main/java/it/grid/storm/catalogs/InvalidReducedBoLChunkDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedBoLChunkDataAttributesException.java similarity index 96% rename from src/main/java/it/grid/storm/catalogs/InvalidReducedBoLChunkDataAttributesException.java rename to src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedBoLChunkDataAttributesException.java index dc3725ab2..9f1e11943 100644 --- a/src/main/java/it/grid/storm/catalogs/InvalidReducedBoLChunkDataAttributesException.java +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedBoLChunkDataAttributesException.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.exceptions; import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TReturnStatus; diff --git a/src/main/java/it/grid/storm/catalogs/InvalidReducedPtGChunkDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedPtGChunkDataAttributesException.java similarity index 96% rename from src/main/java/it/grid/storm/catalogs/InvalidReducedPtGChunkDataAttributesException.java rename to src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedPtGChunkDataAttributesException.java index 6c96fe12f..5964321b3 100644 --- a/src/main/java/it/grid/storm/catalogs/InvalidReducedPtGChunkDataAttributesException.java +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedPtGChunkDataAttributesException.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.exceptions; import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TReturnStatus; diff --git a/src/main/java/it/grid/storm/catalogs/InvalidReducedPtPChunkDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedPtPChunkDataAttributesException.java similarity index 97% rename from src/main/java/it/grid/storm/catalogs/InvalidReducedPtPChunkDataAttributesException.java rename to src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedPtPChunkDataAttributesException.java index 638f7661c..dbfc306d4 100644 --- a/src/main/java/it/grid/storm/catalogs/InvalidReducedPtPChunkDataAttributesException.java +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedPtPChunkDataAttributesException.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.exceptions; import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TReturnStatus; diff --git a/src/main/java/it/grid/storm/catalogs/InvalidRequestSummaryDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidRequestSummaryDataAttributesException.java similarity index 97% rename from src/main/java/it/grid/storm/catalogs/InvalidRequestSummaryDataAttributesException.java rename to src/main/java/it/grid/storm/persistence/exceptions/InvalidRequestSummaryDataAttributesException.java index 4608e2bf5..f55d10c11 100644 --- a/src/main/java/it/grid/storm/catalogs/InvalidRequestSummaryDataAttributesException.java +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidRequestSummaryDataAttributesException.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.exceptions; import it.grid.storm.srm.types.TRequestType; import it.grid.storm.srm.types.TRequestToken; diff --git a/src/main/java/it/grid/storm/catalogs/InvalidSpaceDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidSpaceDataAttributesException.java similarity index 95% rename from src/main/java/it/grid/storm/catalogs/InvalidSpaceDataAttributesException.java rename to src/main/java/it/grid/storm/persistence/exceptions/InvalidSpaceDataAttributesException.java index 8c3fc0c13..22a943776 100644 --- a/src/main/java/it/grid/storm/catalogs/InvalidSpaceDataAttributesException.java +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidSpaceDataAttributesException.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.exceptions; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.srm.types.TSpaceToken; diff --git a/src/main/java/it/grid/storm/catalogs/InvalidSurlRequestDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidSurlRequestDataAttributesException.java similarity index 97% rename from src/main/java/it/grid/storm/catalogs/InvalidSurlRequestDataAttributesException.java rename to src/main/java/it/grid/storm/persistence/exceptions/InvalidSurlRequestDataAttributesException.java index b6d5a217f..b705bd6b3 100644 --- a/src/main/java/it/grid/storm/catalogs/InvalidSurlRequestDataAttributesException.java +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidSurlRequestDataAttributesException.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.exceptions; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; diff --git a/src/main/java/it/grid/storm/catalogs/MalformedGridUserException.java b/src/main/java/it/grid/storm/persistence/exceptions/MalformedGridUserException.java similarity index 92% rename from src/main/java/it/grid/storm/catalogs/MalformedGridUserException.java rename to src/main/java/it/grid/storm/persistence/exceptions/MalformedGridUserException.java index 2fecdce61..a71da2904 100644 --- a/src/main/java/it/grid/storm/catalogs/MalformedGridUserException.java +++ b/src/main/java/it/grid/storm/persistence/exceptions/MalformedGridUserException.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.exceptions; /** * This class represents an Exception thrown when the RequestSummaryCatalog cannot create a diff --git a/src/main/java/it/grid/storm/persistence/exceptions/PersistenceException.java b/src/main/java/it/grid/storm/persistence/exceptions/PersistenceException.java deleted file mode 100644 index 6c9de5a39..000000000 --- a/src/main/java/it/grid/storm/persistence/exceptions/PersistenceException.java +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.persistence.exceptions; - -/** - * This exception is used to mark generic failures in persistence layer - * - */ - -public class PersistenceException extends Exception { - - public PersistenceException() { - - super(); - } - - public PersistenceException(String message) { - - super(message); - } - - public PersistenceException(String message, Throwable cause) { - - super(message, cause); - } - - public PersistenceException(Throwable cause) { - - super(cause); - } - -} diff --git a/src/main/java/it/grid/storm/persistence/impl/mysql/BoLChunkDAOMySql.java b/src/main/java/it/grid/storm/persistence/impl/mysql/BoLChunkDAOMySql.java new file mode 100644 index 000000000..3a51b7ade --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/BoLChunkDAOMySql.java @@ -0,0 +1,984 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.impl.mysql; + +import static it.grid.storm.srm.types.TRequestType.BRING_ON_LINE; +import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED; +import static it.grid.storm.srm.types.TStatusCode.SRM_RELEASED; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_INPROGRESS; +import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS; +import static java.sql.Statement.RETURN_GENERATED_KEYS; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.util.Collection; +import java.util.Date; +import java.util.Iterator; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; + +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.naming.SURL; +import it.grid.storm.persistence.converter.RequestTypeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.BoLChunkDAO; +import it.grid.storm.persistence.model.BoLChunkDataTO; +import it.grid.storm.persistence.model.ReducedBoLChunkDataTO; +import it.grid.storm.persistence.pool.impl.StormDbConnectionPool; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TStatusCode; + +/** + * DAO class for BoLChunkCatalog. This DAO is specifically designed to connect to a MySQL DB. The + * raw data found in those tables is pre-treated in order to turn it into the Object Model of StoRM. + * See Method comments for further info. BEWARE! DAO Adjusts for extra fields in the DB that are not + * present in the object model. + * + * @author CNAF + * @version 1.0 + * @date Aug 2009 + */ +public class BoLChunkDAOMySql extends AbstractDAO implements BoLChunkDAO { + + private static final Logger log = LoggerFactory.getLogger(BoLChunkDAOMySql.class); + + private static final String SELECT_FROM_REQUEST_QUEUE_WITH_TOKEN = + "SELECT rq.ID FROM request_queue rq WHERE rq.r_token=?"; + + private static final String SELECT_FULL_BOL_REQUEST_WITH_TOKEN_AND_STATUS = + "SELECT sb.statusCode, rq.timeStamp, rq.pinLifetime, rq.deferredStartTime, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels " + + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " + + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " + + "LEFT JOIN request_DirOption d ON rb.request_DirOptionID=d.ID " + + "WHERE rq.r_token=? AND sb.statusCode<>?"; + + private static final String SELECT_FULL_BOL_REQUEST_WITH_TOKEN = + "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID " + + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " + + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " + "WHERE rq.r_token=?"; + + private static final String INSERT_INTO_REQUEST_QUEUE = + "INSERT INTO request_queue (config_RequestTypeID,client_dn,pinLifetime,status,errstring,r_token,nbreqfiles,timeStamp,deferredStartTime) " + + "VALUES (?,?,?,?,?,?,?,?,?)"; + + private static final String INSERT_INTO_REQUEST_TRANSFER_PROTOCOLS = + "INSERT INTO request_TransferProtocols (request_queueID,config_ProtocolsID) VALUES (?,?)"; + + private static final String INSERT_INTO_REQUEST_DIR_OPTION = + "INSERT INTO request_DirOption (isSourceADirectory,allLevelRecursive,numOfLevels) VALUES (?,?,?)"; + + private static final String INSERT_INTO_REQUEST_BOL = + "INSERT INTO request_BoL (request_DirOptionID,request_queueID,sourceSURL,normalized_sourceSURL_StFN,sourceSURL_uniqueID) " + + "VALUES (?,?,?,?,?)"; + + private static final String UPDATE_REQUEST_BOL_WHERE_ID = + "UPDATE request_BoL SET normalized_sourceSURL_StFN=?, sourceSURL_uniqueID=? " + "WHERE ID=?"; + + private static final String INSERT_INTO_STATUS_BOL = + "INSERT INTO status_BoL (request_BoLID,statusCode,explanation) VALUES (?,?,?)"; + + private static final String UPDATE_REQUEST_QUEUE_WHERE_ID = + "UPDATE request_queue rq JOIN (status_BoL sb, request_BoL rb) ON (rq.ID=rb.request_queueID AND sb.request_BoLID=rb.ID) " + + "SET sb.fileSize=?, sb.statusCode=?, sb.explanation=?, rq.pinLifetime=?, rb.normalized_sourceSURL_StFN=?, rb.sourceSURL_uniqueID=? " + + "WHERE rb.ID=?"; + + private static final String SELECT_REQUEST_PROTOCOLS_WHERE_TOKEN = "SELECT tp.config_ProtocolsID " + + "FROM request_TransferProtocols tp JOIN request_queue rq ON tp.request_queueID=rq.ID " + + "WHERE rq.r_token=?"; + + private static final String UPDATE_STATUS_WHERE_ID = + "UPDATE status_BoL SET statusCode=?, explanation=? WHERE request_BoLID=?"; + + private static final String UPDATE_STATUS_FOR_EXPIRED_PIN_REQUESTS_WITH_STATUS = + "UPDATE status_BoL sb " + + "JOIN (request_BoL rb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " + + "SET sb.statusCode=? " + + "WHERE sb.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; + + private static final String ABORT_EXPIRED_BOL_REQUESTS_INPROGRESS = + "UPDATE status_BoL sb " + + "JOIN (request_BoL rb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " + + "SET sb.statusCode=? " + + "WHERE sb.statusCode=? AND rq.timeStamp <= DATE_SUB(CURRENT_TIMESTAMP(), INTERVAL ? SECOND)"; + + private static BoLChunkDAOMySql instance; + + public static synchronized BoLChunkDAO getInstance() { + if (instance == null) { + instance = new BoLChunkDAOMySql(); + } + return instance; + } + + private final StatusCodeConverter statusCodeConverter; + private final RequestTypeConverter requestTypeConverter; + + private BoLChunkDAOMySql() { + super(StormDbConnectionPool.getInstance()); + statusCodeConverter = StatusCodeConverter.getInstance(); + requestTypeConverter = RequestTypeConverter.getInstance(); + } + + /** + * Method used to add a new record to the DB: the supplied BoLChunkDataTO gets its primaryKey + * changed to the one assigned by the DB. The supplied BoLChunkData is used to fill in only the DB + * table where file specific info gets recorded: it does _not_ add a new request! So if spurious + * data is supplied, it will just stay there because of a lack of a parent request! + */ + public synchronized void addChild(BoLChunkDataTO to) { + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getManagedConnection(); + + /* + * WARNING!!!! We are forced to run a query to get the ID of the request, which should NOT be + * so because the corresponding request object should have been changed with the extra field! + * However, it is not possible at the moment to perform such change because of strict deadline + * and the change could wreak havoc the code. So we are forced to make this query!!! + */ + + ps = con.prepareStatement(SELECT_FROM_REQUEST_QUEUE_WITH_TOKEN); + ps.setString(1, to.getRequestToken()); + log.debug("BoL CHUNK DAO: addChild; {}", ps); + res = ps.executeQuery(); + + /* ID of request in request_process! */ + int requestId = extractID(res); + int id = fillBoLTables(con, to, requestId); + + // end transaction! + con.commit(); + + // update primary key reading the generated key + to.setPrimaryKey(id); + } catch (Exception e) { + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + } + + /** + * Method used to add a new record to the DB: the supplied BoLChunkDataTO gets its primaryKey + * changed to the one assigned by the DB. The client_dn must also be supplied as a String. The + * supplied BoLChunkData is used to fill in all the DB tables where file specific info gets + * recorded: it _adds_ a new request! + */ + public synchronized void addNew(BoLChunkDataTO to, String client_dn) { + + final String DESCRIPTION = "New BoL Request resulting from srmCopy invocation."; + + /* Result set containing the ID of the inserted new request */ + ResultSet rs = null; + PreparedStatement addReqQ = null; + PreparedStatement addReqTP = null; + Connection con = null; + + try { + // begin transaction + + con = getManagedConnection(); + + // add to request_queue... + addReqQ = con.prepareStatement(INSERT_INTO_REQUEST_QUEUE, RETURN_GENERATED_KEYS); + /* request type set to bring online */ + addReqQ.setString(1, requestTypeConverter.toDB(BRING_ON_LINE)); + addReqQ.setString(2, client_dn); + addReqQ.setInt(3, to.getLifeTime()); + addReqQ.setInt(4, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + addReqQ.setString(5, DESCRIPTION); + addReqQ.setString(6, to.getRequestToken()); + addReqQ.setInt(7, 1); // number of requested files set to 1! + addReqQ.setTimestamp(8, new Timestamp(new Date().getTime())); + addReqQ.setInt(9, to.getDeferredStartTime()); + log.trace("BoL CHUNK DAO: addNew; {}", addReqQ); + addReqQ.execute(); + + rs = addReqQ.getGeneratedKeys(); + int id_new = extractID(rs); + + addReqTP = con.prepareStatement(INSERT_INTO_REQUEST_TRANSFER_PROTOCOLS); + for (Iterator i = to.getProtocolList().iterator(); i.hasNext();) { + addReqTP.setInt(1, id_new); + addReqTP.setString(2, i.next()); + log.trace("BoL CHUNK DAO: addNew; {}", addReqTP); + addReqTP.execute(); + } + + // addChild... + int id_s = fillBoLTables(con, to, id_new); + + // end transaction! + con.commit(); + + // update primary key reading the generated key + to.setPrimaryKey(id_s); + } catch (Exception e) { + log.error("BoL CHUNK DAO: unable to complete addNew! BoLChunkDataTO: {}; " + + "exception received: {}", to, e.getMessage(), e); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rs); + closeStatement(addReqQ); + closeStatement(addReqTP); + closeConnection(con); + } + } + + /** + * To be used inside a transaction + * + * @param to + * @param requestQueueID + * @return + * @throws SQLException + * @throws Exception + */ + private synchronized int fillBoLTables(Connection con, BoLChunkDataTO to, int requestQueueID) + throws SQLException, Exception { + + /* Result set containing the ID of the inserted */ + ResultSet rs_do = null; + /* Result set containing the ID of the inserted */ + ResultSet rs_b = null; + /* Result set containing the ID of the inserted */ + ResultSet rs_s = null; + /* insert TDirOption for request */ + PreparedStatement addDirOption = null; + /* insert request_Bol for request */ + PreparedStatement addBoL = null; + PreparedStatement addChild = null; + + try { + // first fill in TDirOption + addDirOption = con.prepareStatement(INSERT_INTO_REQUEST_DIR_OPTION, RETURN_GENERATED_KEYS); + addDirOption.setBoolean(1, to.getDirOption()); + addDirOption.setBoolean(2, to.getAllLevelRecursive()); + addDirOption.setInt(3, to.getNumLevel()); + log.trace("BoL CHUNK DAO: addNew; {}", addDirOption); + addDirOption.execute(); + + rs_do = addDirOption.getGeneratedKeys(); + int id_do = extractID(rs_do); + + // second fill in request_BoL... sourceSURL and TDirOption! + addBoL = con.prepareStatement(INSERT_INTO_REQUEST_BOL, RETURN_GENERATED_KEYS); + addBoL.setInt(1, id_do); + addBoL.setInt(2, requestQueueID); + addBoL.setString(3, to.getFromSURL()); + addBoL.setString(4, to.normalizedStFN()); + addBoL.setInt(5, to.sulrUniqueID()); + log.trace("BoL CHUNK DAO: addNew; {}", addBoL); + addBoL.execute(); + + rs_b = addBoL.getGeneratedKeys(); + int id_g = extractID(rs_b); + + // third fill in status_BoL... + addChild = con.prepareStatement(INSERT_INTO_STATUS_BOL, RETURN_GENERATED_KEYS); + addChild.setInt(1, id_g); + addChild.setInt(2, to.getStatus()); + addChild.setString(3, to.getErrString()); + log.trace("BoL CHUNK DAO: addNew; " + addChild); + addChild.execute(); + + return id_g; + } finally { + closeResultSet(rs_do); + closeResultSet(rs_b); + closeResultSet(rs_s); + closeStatement(addDirOption); + closeStatement(addBoL); + closeStatement(addChild); + } + } + + /** + * Method used to save the changes made to a retrieved BoLChunkDataTO, back into the MySQL DB. + * Only the fileSize, statusCode and explanation, of status_BoL table are written to the DB. + * Likewise for the request pinLifetime. In case of any error, an error message gets logged but no + * exception is thrown. + */ + public synchronized void update(BoLChunkDataTO to) { + + Connection con = null; + PreparedStatement updateFileReq = null; + try { + con = getConnection(); + // ready updateFileReq... + updateFileReq = con.prepareStatement(UPDATE_REQUEST_QUEUE_WHERE_ID); + updateFileReq.setLong(1, to.getFileSize()); + updateFileReq.setInt(2, to.getStatus()); + updateFileReq.setString(3, to.getErrString()); + updateFileReq.setInt(4, to.getLifeTime()); + updateFileReq.setString(5, to.normalizedStFN()); + updateFileReq.setInt(6, to.sulrUniqueID()); + updateFileReq.setLong(7, to.getPrimaryKey()); + // execute update + log.trace("BoL CHUNK DAO: update method; {}", updateFileReq); + updateFileReq.executeUpdate(); + } catch (SQLException e) { + log.error("BoL CHUNK DAO: Unable to complete update! {}", e.getMessage(), e); + } finally { + closeStatement(updateFileReq); + closeConnection(con); + } + } + + /** + * Updates the request_Bol represented by the received ReducedBoLChunkDataTO by setting its + * normalized_sourceSURL_StFN and sourceSURL_uniqueID + * + * @param chunkTO + */ + public synchronized void updateIncomplete(ReducedBoLChunkDataTO chunkTO) { + + Connection con = null; + PreparedStatement ps = null; + try { + con = getConnection(); + ps = con.prepareStatement(UPDATE_REQUEST_BOL_WHERE_ID); + ps.setString(1, chunkTO.normalizedStFN()); + ps.setInt(2, chunkTO.surlUniqueID()); + ps.setLong(3, chunkTO.primaryKey()); + log.trace("BoL CHUNK DAO - update incomplete: {}", ps); + ps.executeUpdate(); + } catch (SQLException e) { + log.error("BoL CHUNK DAO: Unable to complete update incomplete! {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + } + + /** + * Method that queries the MySQL DB to find all entries matching the supplied TRequestToken. The + * Collection contains the corresponding BoLChunkDataTO objects. An initial simple query + * establishes the list of protocols associated with the request. A second complex query + * establishes all chunks associated with the request, by properly joining request_queue, + * request_BoL, status_BoL and request_DirOption. The considered fields are: (1) From status_BoL: + * the ID field which becomes the TOs primary key, and statusCode. (2) From request_BoL: + * sourceSURL (3) From request_queue: pinLifetime (4) From request_DirOption: isSourceADirectory, + * alLevelRecursive, numOfLevels In case of any error, a log gets written and an empty collection + * is returned. No exception is thrown. NOTE! Chunks in SRM_ABORTED status are NOT returned! + */ + public synchronized Collection find(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement pps = null; + PreparedStatement rps = null; + ResultSet prs = null; + ResultSet rrs = null; + + try { + + con = getConnection(); + pps = con.prepareStatement(SELECT_REQUEST_PROTOCOLS_WHERE_TOKEN); + + List protocols = Lists.newArrayList(); + pps.setString(1, requestToken.getValue()); + log.trace("BoL CHUNK DAO: find method; {}", pps); + prs = pps.executeQuery(); + + while (prs.next()) { + protocols.add(prs.getString("tp.config_ProtocolsID")); + } + + rps = con.prepareStatement(SELECT_FULL_BOL_REQUEST_WITH_TOKEN_AND_STATUS); + List results = Lists.newArrayList(); + rps.setString(1, requestToken.getValue()); + rps.setInt(2, statusCodeConverter.toDB(SRM_ABORTED)); + log.trace("BoL CHUNK DAO: find method; {}", rps); + rrs = rps.executeQuery(); + + while (rrs.next()) { + + BoLChunkDataTO chunkDataTO = new BoLChunkDataTO(); + chunkDataTO.setStatus(rrs.getInt("sb.statusCode")); + chunkDataTO.setLifeTime(rrs.getInt("rq.pinLifetime")); + chunkDataTO.setDeferredStartTime(rrs.getInt("rq.deferredStartTime")); + chunkDataTO.setRequestToken(requestToken.getValue()); + chunkDataTO.setTimeStamp(rrs.getTimestamp("rq.timeStamp")); + chunkDataTO.setPrimaryKey(rrs.getLong("rb.ID")); + chunkDataTO.setFromSURL(rrs.getString("rb.sourceSURL")); + chunkDataTO.setNormalizedStFN(rrs.getString("rb.normalized_sourceSURL_StFN")); + + int uniqueID = rrs.getInt("rb.sourceSURL_uniqueID"); + if (!rrs.wasNull()) { + chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); + } + + chunkDataTO.setDirOption(rrs.getBoolean("d.isSourceADirectory")); + chunkDataTO.setAllLevelRecursive(rrs.getBoolean("d.allLevelRecursive")); + chunkDataTO.setNumLevel(rrs.getInt("d.numOfLevels")); + chunkDataTO.setProtocolList(protocols); + results.add(chunkDataTO); + } + return results; + + } catch (SQLException e) { + + log.error("BOL CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + return Lists.newArrayList(); + + } finally { + closeResultSet(prs); + closeResultSet(rrs); + closeStatement(pps); + closeStatement(rps); + closeConnection(con); + } + } + + /** + * Method that returns a Collection of ReducedBoLChunkDataTO associated to the given TRequestToken + * expressed as String. + */ + public synchronized Collection findReduced(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement ps = null; + ResultSet rs = null; + + List results = Lists.newArrayList(); + + try { + + con = getConnection(); + + ps = con.prepareStatement(SELECT_FULL_BOL_REQUEST_WITH_TOKEN); + ps.setString(1, requestToken.getValue()); + log.trace("BoL CHUNK DAO! findReduced with request token; {}", ps); + rs = ps.executeQuery(); + + ReducedBoLChunkDataTO chunkDataTO = null; + while (rs.next()) { + chunkDataTO = new ReducedBoLChunkDataTO(); + chunkDataTO.setStatus(rs.getInt("sb.statusCode")); + chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); + chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); + chunkDataTO.setNormalizedStFN(rs.getString("rb.normalized_sourceSURL_StFN")); + int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); + if (!rs.wasNull()) { + chunkDataTO.setSurlUniqueID(uniqueID); + } + results.add(chunkDataTO); + } + return results; + + } catch (SQLException e) { + + log.error("BOL CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + return results; + + } finally { + closeResultSet(rs); + closeStatement(ps); + closeConnection(con); + } + } + + /** + * Method that returns a Collection of ReducedBoLChunkDataTO associated to the given griduser, and + * whose SURLs are contained in the supplied array of Strings. + */ + public synchronized Collection findReduced(TRequestToken requestToken, + int[] surlUniqueIDs, String[] surls) { + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + Collection results = Lists.newArrayList(); + + try { + + con = getConnection(); + + /* + * NOTE: we search also on the fromSurl because otherwise we lost all request_Bol that have + * not the uniqueID set because are not yet been used by anybody + */ + // get reduced chunks + String str = + "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID " + + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " + + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " + + "WHERE rq.r_token=? AND ( rb.sourceSURL_uniqueID IN " + + makeSURLUniqueIDWhere(surlUniqueIDs) + " AND rb.sourceSURL IN " + + makeSurlString(surls) + " ) "; + find = con.prepareStatement(str); + find.setString(1, requestToken.getValue()); + + log.trace("BoL CHUNK DAO! findReduced with griduser+surlarray; {}", find); + rs = find.executeQuery(); + + ReducedBoLChunkDataTO chunkDataTO = null; + while (rs.next()) { + chunkDataTO = new ReducedBoLChunkDataTO(); + chunkDataTO.setStatus(rs.getInt("sb.statusCode")); + chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); + chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); + chunkDataTO.setNormalizedStFN(rs.getString("rb.normalized_sourceSURL_StFN")); + int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); + if (!rs.wasNull()) { + chunkDataTO.setSurlUniqueID(uniqueID); + } + results.add(chunkDataTO); + } + } catch (SQLException e) { + log.error("BoL CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + return results; + } + + /** + * Method that returns a Collection of ReducedBoLChunkDataTO associated to the given griduser, and + * whose SURLs are contained in the supplied array of Strings. + */ + public synchronized Collection findReduced(String griduser, + int[] surlUniqueIDs, String[] surls) { + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + + Collection results = Lists.newArrayList(); + + try { + + con = getConnection(); + + /* + * NOTE: we search also on the fromSurl because otherwise we lost all request_Bol that have + * not the uniqueID set because are not yet been used by anybody + */ + // get reduced chunks + String str = + "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID " + + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " + + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " + + "WHERE rq.client_dn=? AND ( rb.sourceSURL_uniqueID IN " + + makeSURLUniqueIDWhere(surlUniqueIDs) + " AND rb.sourceSURL IN " + + makeSurlString(surls) + " ) "; + find = con.prepareStatement(str); + find.setString(1, griduser); + log.trace("BoL CHUNK DAO! findReduced with griduser+surlarray; {}", find); + rs = find.executeQuery(); + + while (rs.next()) { + ReducedBoLChunkDataTO chunkDataTO = new ReducedBoLChunkDataTO(); + chunkDataTO.setStatus(rs.getInt("sb.statusCode")); + chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); + chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); + chunkDataTO.setNormalizedStFN(rs.getString("rb.normalized_sourceSURL_StFN")); + int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); + if (!rs.wasNull()) { + chunkDataTO.setSurlUniqueID(uniqueID); + } + results.add(chunkDataTO); + } + } catch (SQLException e) { + log.error("BoL CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + return results; + } + + public synchronized int updateStatus(BoLChunkDataTO to, TStatusCode status, String explanation) { + + Connection con = null; + PreparedStatement ps = null; + int result = 0; + + try { + con = getConnection(); + ps = con.prepareStatement(UPDATE_STATUS_WHERE_ID); + ps.setInt(1, statusCodeConverter.toDB(status)); + ps.setString(2, explanation); + ps.setLong(3, to.getPrimaryKey()); + log.trace("BoL CHUNK DAO: update status {}", ps); + result = ps.executeUpdate(); + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + return result; + } + + /** + * Method that updates to SRM_RELEASED all the requests in SRM_SUCCESS status which have the + * requested pin lifetime expired. This is necessary when the client forgets to invoke + * srmReleaseFiles(). + * + * @return List of updated SURLs. + */ + public synchronized int releaseExpiredAndSuccessfulRequests() { + + Connection con = null; + PreparedStatement ps = null; + + int count = 0; + + try { + + // start transaction + con = getConnection(); + + /* Update status of all successful expired requests to SRM_RELEASED */ + ps = con.prepareStatement(UPDATE_STATUS_FOR_EXPIRED_PIN_REQUESTS_WITH_STATUS); + ps.setInt(1, statusCodeConverter.toDB(SRM_RELEASED)); + ps.setInt(2, statusCodeConverter.toDB(SRM_SUCCESS)); + log.trace("BoL CHUNK DAO - transitExpiredSRM_SUCCESS method: {}", ps); + + count = ps.executeUpdate(); + + if (count == 0) { + log.trace( + "BoLChunkDAO! No chunk of BoL request was transited from SRM_SUCCESS to SRM_RELEASED."); + } else { + log.info( + "BoLChunkDAO! {} chunks of BoL requests were transited from SRM_SUCCESS to SRM_RELEASED.", + count); + } + + } catch (SQLException e) { + + log.error("BoLChunkDAO! SQLException.", e.getMessage(), e); + e.printStackTrace(); + + } finally { + + closeStatement(ps); + closeConnection(con); + } + return count; + } + + public int abortInProgressRequestsSince(long expirationTimeInSeconds) { + + Connection con = null; + PreparedStatement ps = null; + + int count = 0; + + try { + + // start transaction + con = getConnection(); + + ps = con.prepareStatement(ABORT_EXPIRED_BOL_REQUESTS_INPROGRESS); + ps.setInt(1, statusCodeConverter.toDB(SRM_ABORTED)); + ps.setInt(2, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + ps.setLong(3, expirationTimeInSeconds); + log.trace("BoL CHUNK DAO - transitExpiredSRM_SUCCESS method: {}", ps); + + count = ps.executeUpdate(); + + if (count == 0) { + log.trace( + "BoLChunkDAO! No chunk of BoL request was transited from SRM_SUCCESS to SRM_RELEASED."); + } else { + log.info( + "BoLChunkDAO! {} chunks of BoL requests were transited from SRM_SUCCESS to SRM_RELEASED.", + count); + } + + } catch (SQLException e) { + + log.error("BoLChunkDAO! SQLException.", e.getMessage(), e); + e.printStackTrace(); + + } finally { + + closeStatement(ps); + closeConnection(con); + } + return count; + } + + + public synchronized void updateStatusOnMatchingStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation) { + + if (requestToken == null || requestToken.getValue().trim().isEmpty() || explanation == null) { + throw new IllegalArgumentException("Unable to perform the updateStatusOnMatchingStatus, " + + "invalid arguments: requestToken=" + requestToken + " explanation=" + explanation); + } + doUpdateStatusOnMatchingStatus(requestToken, null, null, expectedStatusCode, newStatusCode, + explanation, true, false, true); + } + + private synchronized int doUpdateStatusOnMatchingStatus(TRequestToken requestToken, + int[] surlUniqueIDs, String[] surls, TStatusCode expectedStatusCode, + TStatusCode newStatusCode, String explanation, boolean withRequestToken, boolean withSurls, + boolean withExplanation) throws IllegalArgumentException { + + if ((withRequestToken && requestToken == null) || (withExplanation && explanation == null) + || (withSurls && (surlUniqueIDs == null || surls == null))) { + throw new IllegalArgumentException("Unable to perform the doUpdateStatusOnMatchingStatus, " + + "invalid arguments: withRequestToken=" + withRequestToken + " requestToken=" + + requestToken + " withSurls=" + withSurls + " surlUniqueIDs=" + surlUniqueIDs + " surls=" + + surls + " withExplaination=" + withExplanation + " explanation=" + explanation); + } + String str = "UPDATE status_BoL sb JOIN (request_BoL rb, request_queue rq) " + + "ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " + "SET sb.statusCode=? "; + if (withExplanation) { + str += " , " + buildExplanationSet(explanation); + } + str += " WHERE sb.statusCode=? "; + if (withRequestToken) { + str += " AND " + buildTokenWhereClause(requestToken); + } + if (withSurls) { + str += " AND " + buildSurlsWhereClause(surlUniqueIDs, surls); + } + + Connection con = null; + PreparedStatement stmt = null; + int count = 0; + + try { + con = getConnection(); + stmt = con.prepareStatement(str); + stmt.setInt(1, statusCodeConverter.toDB(newStatusCode)); + stmt.setInt(2, statusCodeConverter.toDB(expectedStatusCode)); + log.trace("BOL CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt); + count = stmt.executeUpdate(); + + } catch (SQLException e) { + log.error("BOL CHUNK DAO! Unable to updated from {} to {}!", expectedStatusCode, + newStatusCode, e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + + return count; + } + + public Collection find(int[] surlsUniqueIDs, String[] surlsArray, String dn) + throws IllegalArgumentException { + + if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 || surlsArray == null + || surlsArray.length == 0 || dn == null) { + throw new IllegalArgumentException( + "Unable to perform the find, " + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs + + " surlsArray=" + surlsArray + " dn=" + dn); + } + return find(surlsUniqueIDs, surlsArray, dn, true); + } + + public Collection find(int[] surlsUniqueIDs, String[] surlsArray) + throws IllegalArgumentException { + + if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 || surlsArray == null + || surlsArray.length == 0) { + throw new IllegalArgumentException("Unable to perform the find, " + + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs + " surlsArray=" + surlsArray); + } + return find(surlsUniqueIDs, surlsArray, null, false); + } + + private synchronized Collection find(int[] surlsUniqueIDs, String[] surlsArray, + String dn, boolean withDn) throws IllegalArgumentException { + + if ((withDn && dn == null) || surlsUniqueIDs == null || surlsUniqueIDs.length == 0 + || surlsArray == null || surlsArray.length == 0) { + throw new IllegalArgumentException( + "Unable to perform the find, " + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs + + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn); + } + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + Collection results = Lists.newArrayList(); + + try { + + con = getConnection(); + + // get chunks of the request + String str = "SELECT rq.ID, rq.r_token, sb.statusCode, rq.timeStamp, rq.pinLifetime, " + + "rq.deferredStartTime, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, " + + "rb.sourceSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels " + + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " + + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " + + "LEFT JOIN request_DirOption d ON rb.request_DirOptionID=d.ID " + + "WHERE ( rb.sourceSURL_uniqueID IN " + makeSURLUniqueIDWhere(surlsUniqueIDs) + + " AND rb.sourceSURL IN " + makeSurlString(surlsArray) + " )"; + + if (withDn) { + str += " AND rq.client_dn=\'" + dn + "\'"; + } + find = con.prepareStatement(str); + + log.trace("BOL CHUNK DAO - find method: {}", find); + rs = find.executeQuery(); + + while (rs.next()) { + + BoLChunkDataTO chunkDataTO = new BoLChunkDataTO(); + chunkDataTO.setStatus(rs.getInt("sb.statusCode")); + chunkDataTO.setLifeTime(rs.getInt("rq.pinLifetime")); + chunkDataTO.setDeferredStartTime(rs.getInt("rq.deferredStartTime")); + chunkDataTO.setRequestToken(rs.getString("rq.r_token")); + chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); + chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); + chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); + chunkDataTO.setNormalizedStFN(rs.getString("rb.normalized_sourceSURL_StFN")); + + int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); + if (!rs.wasNull()) { + chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); + } + + chunkDataTO.setDirOption(rs.getBoolean("d.isSourceADirectory")); + chunkDataTO.setAllLevelRecursive(rs.getBoolean("d.allLevelRecursive")); + chunkDataTO.setNumLevel(rs.getInt("d.numOfLevels")); + + results.add(chunkDataTO); + } + + } catch (SQLException e) { + + log.error("BOL CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + + } finally { + + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + + } + + return results; + } + + /** + * Private method that returns the generated ID: it throws an exception in case of any problem! + */ + private int extractID(ResultSet rs) throws Exception { + + if (rs == null) { + throw new Exception("BoL CHUNK DAO! Null ResultSet!"); + } + if (rs.next()) { + return rs.getInt(1); + } + String msg = + "BoL CHUNK DAO! It was not possible to establish the assigned autoincrement primary key!"; + log.error(msg); + throw new Exception(msg); + } + + /** + * Method that returns a String containing all Surl's IDs. + */ + private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) { + + StringBuilder sb = new StringBuilder("("); + for (int i = 0; i < surlUniqueIDs.length; i++) { + if (i > 0) { + sb.append(","); + } + sb.append(surlUniqueIDs[i]); + } + sb.append(")"); + return sb.toString(); + } + + /** + * Method that returns a String containing all SURLs. + */ + private String makeSurlString(String[] surls) { + + StringBuilder sb = new StringBuilder("("); + int n = surls.length; + + for (int i = 0; i < n; i++) { + + SURL requestedSURL; + + try { + requestedSURL = SURL.makeSURLfromString(surls[i]); + } catch (NamespaceException e) { + log.error(e.getMessage()); + log.debug("Skip '{}' during query creation", surls[i]); + continue; + } + + sb.append("'"); + sb.append(requestedSURL.getNormalFormAsString()); + sb.append("','"); + sb.append(requestedSURL.getQueryFormAsString()); + sb.append("'"); + + if (i < (n - 1)) { + sb.append(","); + } + } + + sb.append(")"); + return sb.toString(); + } + + private String buildExplanationSet(String explanation) { + + return " sb.explanation='" + explanation + "' "; + } + + private String buildTokenWhereClause(TRequestToken requestToken) { + + return " rq.r_token='" + requestToken.toString() + "' "; + } + + private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) { + + return " ( rb.sourceSURL_uniqueID IN " + makeSURLUniqueIDWhere(surlsUniqueIDs) + + " AND rb.sourceSURL IN " + makeSurlString(surls) + " ) "; + } + +} \ No newline at end of file diff --git a/src/main/java/it/grid/storm/persistence/impl/mysql/PtGChunkDAOMySql.java b/src/main/java/it/grid/storm/persistence/impl/mysql/PtGChunkDAOMySql.java new file mode 100644 index 000000000..1c4fbac5c --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/PtGChunkDAOMySql.java @@ -0,0 +1,1250 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.impl.mysql; + +import static it.grid.storm.srm.types.TRequestType.PREPARE_TO_GET; +import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED; +import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; +import static it.grid.storm.srm.types.TStatusCode.SRM_FILE_PINNED; +import static it.grid.storm.srm.types.TStatusCode.SRM_RELEASED; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_INPROGRESS; +import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS; +import static java.sql.Statement.RETURN_GENERATED_KEYS; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.util.Collection; +import java.util.Date; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; + +import it.grid.storm.ea.StormEA; +import it.grid.storm.namespace.Namespace; +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.StoRI; +import it.grid.storm.namespace.naming.SURL; +import it.grid.storm.persistence.converter.RequestTypeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.PtGChunkDAO; +import it.grid.storm.persistence.model.PtGChunkDataTO; +import it.grid.storm.persistence.model.ReducedPtGChunkDataTO; +import it.grid.storm.persistence.pool.impl.StormDbConnectionPool; +import it.grid.storm.srm.types.InvalidTSURLAttributesException; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TStatusCode; + +/** + * DAO class for PtGChunkCatalog. This DAO is specifically designed to connect to a MySQL DB. The + * raw data found in those tables is pre-treated in order to turn it into the Object Model of StoRM. + * See Method comments for further info. + * + * BEWARE! DAO Adjusts for extra fields in the DB that are not present in the object model. + * + * @author EGRID ICTP + * @version 3.0 + * @date June 2005 + */ +public class PtGChunkDAOMySql extends AbstractDAO implements PtGChunkDAO { + + private static final Logger log = LoggerFactory.getLogger(PtGChunkDAOMySql.class); + + private static final String SELECT_REQUEST_WHERE_TOKEN = + "SELECT * FROM request_queue WHERE r_token=?"; + + private static final String INSERT_REQUEST = + "INSERT INTO request_queue (config_RequestTypeID,client_dn,pinLifetime,status,errstring,r_token,nbreqfiles,timeStamp) " + + "VALUES (?,?,?,?,?,?,?,?)"; + + private static final String INSERT_REQUEST_TRASNFER_PROTOCOL = + "INSERT INTO request_TransferProtocols (request_queueID,config_ProtocolsID) " + + "VALUES (?,?)"; + + private static final String INSERT_REQUEST_DIR_OPTION = + "INSERT INTO request_DirOption (isSourceADirectory,allLevelRecursive,numOfLevels) " + + "VALUES (?,?,?)"; + + private static final String INSERT_REQUEST_GET = + "INSERT INTO request_Get (request_DirOptionID,request_queueID,sourceSURL,normalized_sourceSURL_StFN,sourceSURL_uniqueID) " + + "VALUES (?,?,?,?,?)"; + + private static final String INSERT_STATUS_GET = + "INSERT INTO status_Get (request_GetID,statusCode,explanation) VALUES (?,?,?)"; + + private static final String UPDATE_REQUEST_GET_STATUS_WHERE_ID = + "UPDATE request_queue rq JOIN (status_Get sg, request_Get rg) ON (rq.ID=rg.request_queueID AND sg.request_GetID=rg.ID) " + + "SET sg.fileSize=?, sg.transferURL=?, sg.statusCode=?, sg.explanation=?, rq.pinLifetime=?, rg.normalized_sourceSURL_StFN=?, rg.sourceSURL_uniqueID=? " + + "WHERE rg.ID=?"; + + private static final String UPDATE_REQUEST_GET_WHERE_ID = + "UPDATE request_Get rg SET rg.normalized_sourceSURL_StFN=?, rg.sourceSURL_uniqueID=? " + + "WHERE rg.ID=?"; + + private static final String SELECT_STATUS_GET_WHERE_GET_ID = + "SELECT statusCode, transferURL FROM status_Get WHERE request_GetID=?"; + + private static final String SELECT_REQUEST_GET_PROTOCOLS_WHERE_TOKEN = + "SELECT tp.config_ProtocolsID " + + "FROM request_TransferProtocols tp JOIN request_queue rq ON tp.request_queueID=rq.ID " + + "WHERE rq.r_token=?"; + + private static final String SELECT_REQUEST_GET_WHERE_TOKEN_AND_STATUS = + "SELECT sg.statusCode, rq.pinLifetime, rg.ID, rq.timeStamp, rq.client_dn, rq.proxy, rg.sourceSURL, " + + "rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID, d.isSourceADirectory, " + + "d.allLevelRecursive, d.numOfLevels " + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " + + "LEFT JOIN request_DirOption d ON rg.request_DirOptionID=d.ID " + + "WHERE rq.r_token=? AND sg.statusCode<>?"; + + private static final String SELECT_REQUEST_GET_WHERE_TOKEN = + "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID " + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " + "WHERE rq.r_token=?"; + + private static final String UPDATE_STATUS_GET_WHERE_REQUEST_GET_ID_IS = + "UPDATE status_Get SET statusCode=?, explanation=? WHERE request_GetID=?"; + + private static final String COUNT_REQUEST_ON_SURL_WITH_STATUS = + "SELECT COUNT(rg.ID) FROM status_Get sg JOIN request_Get rg " + + "ON (sg.request_GetID=rg.ID) WHERE rg.sourceSURL_uniqueID=? AND sg.statusCode=?"; + + private static final String SELECT_EXPIRED_REQUESTS = + "SELECT rg.sourceSURL , rg.sourceSURL_uniqueID " + + "FROM request_Get rg JOIN (status_Get sg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " + + "WHERE sg.statusCode=?" + + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime"; + + private static final String UPDATE_STATUS_OF_EXPIRED_REQUESTS = + "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " + + "SET sg.statusCode=? " + + "WHERE sg.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; + + private static final String SELECT_PTG_PINNED_SURLS = + "SELECT rg.sourceSURL , rg.sourceSURL_uniqueID FROM " + + "request_Get rg JOIN (status_Get sg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " + + "WHERE sg.statusCode=?" + + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime "; + + private static final String SELECT_BOL_PINNED_SURLS = + "SELECT rb.sourceSURL , rb.sourceSURL_uniqueID FROM " + + "request_BoL rb JOIN (status_BoL sb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " + + "WHERE sb.statusCode=?" + + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime "; + + private static PtGChunkDAOMySql instance; + + public static synchronized PtGChunkDAO getInstance() { + if (instance == null) { + instance = new PtGChunkDAOMySql(); + } + return instance; + } + + private final RequestTypeConverter requestTypeConverter; + private final StatusCodeConverter statusCodeConverter; + + private PtGChunkDAOMySql() { + + super(StormDbConnectionPool.getInstance()); + requestTypeConverter = RequestTypeConverter.getInstance(); + statusCodeConverter = StatusCodeConverter.getInstance(); + } + + /** + * Method used to add a new record to the DB: the supplied PtGChunkDataTO gets its primaryKey + * changed to the one assigned by the DB. + * + * The supplied PtGChunkData is used to fill in only the DB table where file specific info gets + * recorded: it does _not_ add a new request! So if spurious data is supplied, it will just stay + * there because of a lack of a parent request! + */ + public synchronized void addChild(PtGChunkDataTO to) { + + Connection con = null; + PreparedStatement id = null; + ResultSet rsid = null; + + try { + + // WARNING!!!! We are forced to run a query to get the ID of the request, + // which should NOT be so + // because the corresponding request object should have been changed with + // the extra field! However, it is not possible + // at the moment to perform such chage because of strict deadline and the + // change could wreak havoc + // the code. So we are forced to make this query!!! + + con = getManagedConnection(); + id = con.prepareStatement(SELECT_REQUEST_WHERE_TOKEN); + id.setString(1, to.requestToken()); + log.debug("PTG CHUNK DAO: addChild; {}", id); + rsid = id.executeQuery(); + + if (rsid.next()) { + + int requestId = rsid.getInt("ID"); + int id_s = fillPtGTables(con, to, requestId); + con.commit(); + to.setPrimaryKey(id_s); + + } else { + log.error("Unable to find queued request for token {}", to.requestToken()); + con.rollback(); + } + + } catch (SQLException e) { + log.error("PTG CHUNK DAO: unable to complete addChild! " + "PtGChunkDataTO: {}; error: {}", + to, e.getMessage(), e); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rsid); + closeStatement(id); + closeConnection(con); + } + } + + /** + * Method used to add a new record to the DB: the supplied PtGChunkDataTO gets its primaryKey + * changed to the one assigned by the DB. The client_dn must also be supplied as a String. + * + * The supplied PtGChunkData is used to fill in all the DB tables where file specific info gets + * recorded: it _adds_ a new request! + */ + public synchronized void addNew(PtGChunkDataTO to, String clientDn) { + + Connection con = null; + ResultSet rsNew = null; + PreparedStatement addNew = null; + PreparedStatement addProtocols = null; + + try { + + con = getManagedConnection(); + + addNew = con.prepareStatement(INSERT_REQUEST, RETURN_GENERATED_KEYS); + addNew.setString(1, requestTypeConverter.toDB(PREPARE_TO_GET)); + addNew.setString(2, clientDn); + addNew.setInt(3, to.lifeTime()); + addNew.setInt(4, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + addNew.setString(5, "New PtG Request resulting from srmCopy invocation."); + addNew.setString(6, to.requestToken()); + addNew.setInt(7, 1); // number of requested files set to 1! + addNew.setTimestamp(8, new Timestamp(new Date().getTime())); + log.trace("PTG CHUNK DAO: addNew; {}", addNew); + addNew.execute(); + + rsNew = addNew.getGeneratedKeys(); + + if (!rsNew.next()) { + log.error("Unable to insert new request"); + con.rollback(); + return; + } + int idNew = rsNew.getInt(1); + + // add protocols... + addProtocols = con.prepareStatement(INSERT_REQUEST_TRASNFER_PROTOCOL); + for (Iterator i = to.protocolList().iterator(); i.hasNext();) { + addProtocols.setInt(1, idNew); + addProtocols.setString(2, i.next()); + log.trace("PTG CHUNK DAO: addNew; {}", addProtocols); + addProtocols.execute(); + } + + // addChild... + int id = fillPtGTables(con, to, idNew); + + // end transaction! + con.commit(); + + // update primary key reading the generated key + to.setPrimaryKey(id); + + } catch (SQLException e) { + log.error("PTG CHUNK DAO: Rolling back! Unable to complete addNew! " + + "PtGChunkDataTO: {}; error: {}", to, e.getMessage(), e); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rsNew); + closeStatement(addNew); + closeStatement(addProtocols); + closeConnection(con); + } + } + + /** + * To be used inside a transaction + * + * @param to + * @param requestQueueID + * @return + * @throws SQLException + * @throws Exception + */ + private synchronized int fillPtGTables(Connection con, PtGChunkDataTO to, int requestQueueID) + throws SQLException { + + ResultSet rsDo = null; + ResultSet rsG = null; + ResultSet rsS = null; + PreparedStatement addDirOption = null; + PreparedStatement addGet = null; + PreparedStatement addChild = null; + + try { + + // first fill in TDirOption + addDirOption = con.prepareStatement(INSERT_REQUEST_DIR_OPTION, RETURN_GENERATED_KEYS); + addDirOption.setBoolean(1, to.dirOption()); + addDirOption.setBoolean(2, to.allLevelRecursive()); + addDirOption.setInt(3, to.numLevel()); + log.trace("PTG CHUNK DAO: addNew; {}", addDirOption); + addDirOption.execute(); + + rsDo = addDirOption.getGeneratedKeys(); + + if (!rsDo.next()) { + throw new SQLException("Unable to get dir_option id"); + } + int idDo = rsDo.getInt(1); + + // second fill in request_Get... sourceSURL and TDirOption! + addGet = con.prepareStatement(INSERT_REQUEST_GET, RETURN_GENERATED_KEYS); + addGet.setInt(1, idDo); + addGet.setInt(2, requestQueueID); + addGet.setString(3, to.fromSURL()); + addGet.setString(4, to.normalizedStFN()); + addGet.setInt(5, to.surlUniqueID()); + log.trace("PTG CHUNK DAO: addNew; {}", addGet); + addGet.execute(); + + rsG = addGet.getGeneratedKeys(); + if (!rsG.next()) { + throw new SQLException("Unable to get request_get id"); + } + int idG = rsG.getInt(1); + + // third fill in status_Get... + addChild = con.prepareStatement(INSERT_STATUS_GET, RETURN_GENERATED_KEYS); + addChild.setInt(1, idG); + addChild.setInt(2, to.status()); + addChild.setString(3, to.errString()); + log.trace("PTG CHUNK DAO: addNew; {}", addChild); + addChild.execute(); + + return idG; + + } finally { + closeResultSet(rsDo); + closeResultSet(rsG); + closeResultSet(rsS); + closeStatement(addDirOption); + closeStatement(addGet); + closeStatement(addChild); + } + } + + /** + * Method used to save the changes made to a retrieved PtGChunkDataTO, back into the MySQL DB. + * + * Only the fileSize, transferURL, statusCode and explanation, of status_Get table are written to + * the DB. Likewise for the request pinLifetime. + * + * In case of any error, an error message gets logged but no exception is thrown. + */ + public synchronized void update(PtGChunkDataTO to) { + + Connection con = null; + PreparedStatement updateFileReq = null; + + try { + + con = getConnection(); + updateFileReq = con.prepareStatement(UPDATE_REQUEST_GET_STATUS_WHERE_ID); + updateFileReq.setLong(1, to.fileSize()); + updateFileReq.setString(2, to.turl()); + updateFileReq.setInt(3, to.status()); + updateFileReq.setString(4, to.errString()); + updateFileReq.setInt(5, to.lifeTime()); + updateFileReq.setString(6, to.normalizedStFN()); + updateFileReq.setInt(7, to.surlUniqueID()); + updateFileReq.setLong(8, to.primaryKey()); + // execute update + log.trace("PTG CHUNK DAO: update method; {}", updateFileReq); + updateFileReq.executeUpdate(); + + } catch (SQLException e) { + log.error("PtG CHUNK DAO: Unable to complete update! {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(updateFileReq); + closeConnection(con); + } + } + + /** + * Updates the request_Get represented by the received ReducedPtGChunkDataTO by setting its + * normalized_sourceSURL_StFN and sourceSURL_uniqueID + * + * @param chunkTO + */ + public synchronized void updateIncomplete(ReducedPtGChunkDataTO chunkTO) { + + Connection con = null; + PreparedStatement update = null; + + try { + con = getConnection(); + update = con.prepareStatement(UPDATE_REQUEST_GET_WHERE_ID); + update.setString(1, chunkTO.normalizedStFN()); + update.setInt(2, chunkTO.surlUniqueID()); + update.setLong(3, chunkTO.primaryKey()); + log.trace("PtG CHUNK DAO - update incomplete: {}", update); + update.executeUpdate(); + } catch (SQLException e) { + log.error("PtG CHUNK DAO: Unable to complete update incomplete! {}", e.getMessage(), e); + } finally { + closeStatement(update); + closeConnection(con); + } + } + + /** + * TODO WARNING! THIS IS A WORK IN PROGRESS!!! + * + * Method used to refresh the PtGChunkDataTO information from the MySQL DB. + * + * In this first version, only the statusCode and the TURL are reloaded from the DB. TODO The next + * version must contains all the information related to the Chunk! + * + * In case of any error, an error messagge gets logged but no exception is thrown. + */ + + public synchronized PtGChunkDataTO refresh(long primaryKey) { + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + PtGChunkDataTO chunkDataTO = null; + + try { + + con = getConnection(); + find = con.prepareStatement(SELECT_STATUS_GET_WHERE_GET_ID); + find.setLong(1, primaryKey); + log.trace("PTG CHUNK DAO: refresh status method; {}", find); + rs = find.executeQuery(); + + while (rs.next()) { + chunkDataTO = new PtGChunkDataTO(); + chunkDataTO.setStatus(rs.getInt("sg.statusCode")); + chunkDataTO.setTurl(rs.getString("sg.transferURL")); + } + return chunkDataTO; + + } catch (SQLException e) { + + log.error("PTG CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + return null; + + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + } + + /** + * Method that queries the MySQL DB to find all entries matching the supplied TRequestToken. The + * Collection contains the corresponding PtGChunkDataTO objects. + * + * An initial simple query establishes the list of protocols associated with the request. A second + * complex query establishes all chunks associated with the request, by properly joining + * request_queue, request_Get, status_Get and request_DirOption. The considered fields are: + * + * (1) From status_Get: the ID field which becomes the TOs primary key, and statusCode. + * + * (2) From request_Get: sourceSURL + * + * (3) From request_queue: pinLifetime + * + * (4) From request_DirOption: isSourceADirectory, alLevelRecursive, numOfLevels + * + * In case of any error, a log gets written and an empty collection is returned. No exception is + * thrown. + * + * NOTE! Chunks in SRM_ABORTED status are NOT returned! + */ + public synchronized Collection find(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement findProtocols = null; + PreparedStatement findRequest = null; + ResultSet rsProtocols = null; + ResultSet rsRequest = null; + Collection results = Lists.newArrayList(); + + try { + + con = getManagedConnection(); + findProtocols = con.prepareStatement(SELECT_REQUEST_GET_PROTOCOLS_WHERE_TOKEN); + + List protocols = Lists.newArrayList(); + findProtocols.setString(1, requestToken.getValue()); + log.trace("PTG CHUNK DAO: find method; {}", findProtocols); + rsProtocols = findProtocols.executeQuery(); + while (rsProtocols.next()) { + protocols.add(rsProtocols.getString("tp.config_ProtocolsID")); + } + + findRequest = con.prepareStatement(SELECT_REQUEST_GET_WHERE_TOKEN_AND_STATUS); + findRequest.setString(1, requestToken.getValue()); + findRequest.setInt(2, statusCodeConverter.toDB(SRM_ABORTED)); + log.trace("PTG CHUNK DAO: find method; {}", findRequest); + rsRequest = findRequest.executeQuery(); + + PtGChunkDataTO chunkDataTO; + while (rsRequest.next()) { + chunkDataTO = new PtGChunkDataTO(); + chunkDataTO.setStatus(rsRequest.getInt("sg.statusCode")); + chunkDataTO.setRequestToken(requestToken.getValue()); + chunkDataTO.setPrimaryKey(rsRequest.getLong("rg.ID")); + chunkDataTO.setFromSURL(rsRequest.getString("rg.sourceSURL")); + chunkDataTO.setNormalizedStFN(rsRequest.getString("rg.normalized_sourceSURL_StFN")); + int uniqueID = rsRequest.getInt("rg.sourceSURL_uniqueID"); + if (!rsRequest.wasNull()) { + chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); + } + chunkDataTO.setClientDN(rsRequest.getString("rq.client_dn")); + + /** + * This code is only for the 1.3.18. This is a workaround to get FQANs using the proxy field + * on request_queue. The FE use the proxy field of request_queue to insert a single FQAN + * string containing all FQAN separeted by the "#" char. The proxy is a BLOB, hence it has + * to be properly conveted in string. + */ + java.sql.Blob blob = rsRequest.getBlob("rq.proxy"); + if (!rsRequest.wasNull() && blob != null) { + byte[] bdata = blob.getBytes(1, (int) blob.length()); + chunkDataTO.setVomsAttributes(new String(bdata)); + } + chunkDataTO.setTimeStamp(rsRequest.getTimestamp("rq.timeStamp")); + chunkDataTO.setLifeTime(rsRequest.getInt("rq.pinLifetime")); + chunkDataTO.setDirOption(rsRequest.getBoolean("d.isSourceADirectory")); + chunkDataTO.setAllLevelRecursive(rsRequest.getBoolean("d.allLevelRecursive")); + chunkDataTO.setNumLevel(rsRequest.getInt("d.numOfLevels")); + chunkDataTO.setProtocolList(protocols); + results.add(chunkDataTO); + } + con.commit(); + } catch (SQLException e) { + log.error("PTG CHUNK DAO: ", e.getMessage(), e); + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rsProtocols); + closeResultSet(rsRequest); + closeStatement(findProtocols); + closeStatement(findRequest); + closeConnection(con); + } + return results; + } + + /** + * Method that returns a Collection of ReducedPtGChunkDataTO associated to the given TRequestToken + * expressed as String. + */ + public synchronized Collection findReduced(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + Collection results = Lists.newArrayList(); + + try { + + con = getConnection(); + find = con.prepareStatement(SELECT_REQUEST_GET_WHERE_TOKEN); + find.setString(1, requestToken.getValue()); + log.trace("PtG CHUNK DAO! findReduced with request token; {}", find); + rs = find.executeQuery(); + + while (rs.next()) { + ReducedPtGChunkDataTO reducedChunkDataTO = new ReducedPtGChunkDataTO(); + reducedChunkDataTO.setStatus(rs.getInt("sg.statusCode")); + reducedChunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); + reducedChunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); + reducedChunkDataTO.setNormalizedStFN(rs.getString("rg.normalized_sourceSURL_StFN")); + int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); + if (!rs.wasNull()) { + reducedChunkDataTO.setSurlUniqueID(uniqueID); + } + results.add(reducedChunkDataTO); + } + + } catch (SQLException e) { + log.error("PTG CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + return results; + } + + public synchronized Collection findReduced(TRequestToken requestToken, + int[] surlsUniqueIDs, String[] surlsArray) { + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + Collection results = Lists.newArrayList(); + + try { + + String str = + "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID " + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " + + "WHERE rq.r_token=? AND ( rg.sourceSURL_uniqueID IN " + + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rg.sourceSURL IN " + + makeSurlString(surlsArray) + " ) "; + + con = getConnection(); + find = con.prepareStatement(str); + find.setString(1, requestToken.getValue()); + log.trace("PtG CHUNK DAO! findReduced with griduser+surlarray; {}", find); + rs = find.executeQuery(); + + while (rs.next()) { + ReducedPtGChunkDataTO chunkDataTO = new ReducedPtGChunkDataTO(); + chunkDataTO.setStatus(rs.getInt("sg.statusCode")); + chunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); + chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); + chunkDataTO.setNormalizedStFN(rs.getString("rg.normalized_sourceSURL_StFN")); + int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); + if (!rs.wasNull()) { + chunkDataTO.setSurlUniqueID(uniqueID); + } + results.add(chunkDataTO); + } + } catch (SQLException e) { + log.error("PTG CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + return results; + } + + /** + * Method that returns a Collection of ReducedPtGChunkDataTO associated to the given griduser, and + * whose SURLs are contained in the supplied array of Strings. + */ + public synchronized Collection findReduced(String griduser, + int[] surlUniqueIDs, String[] surls) { + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + Collection results = Lists.newArrayList(); + + try { + /* + * NOTE: we search also on the fromSurl because otherwise we lost all request_get that have + * not the uniqueID set because are not yet been used by anybody + */ + con = getConnection(); + // get reduced chunks + String str = + "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID " + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " + + "WHERE rq.client_dn=? AND ( rg.sourceSURL_uniqueID IN " + + makeSURLUniqueIDWhere(surlUniqueIDs) + " AND rg.sourceSURL IN " + + makeSurlString(surls) + " ) "; + find = con.prepareStatement(str); + find.setString(1, griduser); + log.trace("PtG CHUNK DAO! findReduced with griduser+surlarray; {}", find); + rs = find.executeQuery(); + + while (rs.next()) { + ReducedPtGChunkDataTO chunkDataTO = new ReducedPtGChunkDataTO(); + chunkDataTO.setStatus(rs.getInt("sg.statusCode")); + chunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); + chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); + chunkDataTO.setNormalizedStFN(rs.getString("rg.normalized_sourceSURL_StFN")); + int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); + if (!rs.wasNull()) { + chunkDataTO.setSurlUniqueID(uniqueID); + } + results.add(chunkDataTO); + } + } catch (SQLException e) { + log.error("PTG CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + return results; + } + + /** + * Method used in extraordinary situations to signal that data retrieved from the DB was malformed + * and could not be translated into the StoRM object model. + * + * This method attempts to change the status of the request to SRM_FAILURE and record it in the + * DB. + * + * This operation could potentially fail because the source of the malformed problems could be a + * problematic DB; indeed, initially only log messagges where recorded. + * + * Yet it soon became clear that the source of malformed data were the clients and/or FE recording + * info in the DB. In these circumstances the client would see its request as being in the + * SRM_IN_PROGRESS state for ever. Hence the pressing need to inform it of the encountered + * problems. + */ + public synchronized void fail(PtGChunkDataTO auxTO) { + + Connection con = null; + PreparedStatement update = null; + + try { + + con = getConnection(); + update = con.prepareStatement(UPDATE_STATUS_GET_WHERE_REQUEST_GET_ID_IS); + update.setInt(1, statusCodeConverter.toDB(SRM_FAILURE)); + update.setString(2, "Request is malformed!"); + update.setLong(3, auxTO.primaryKey()); + log.trace("PTG CHUNK DAO: signalMalformed; {}", update); + update.executeUpdate(); + + } catch (SQLException e) { + log.error("PtGChunkDAO! Unable to signal in DB that the request was " + + "malformed! Request: {}; Exception: {}", auxTO.toString(), e.toString()); + e.printStackTrace(); + } finally { + closeStatement(update); + closeConnection(con); + } + } + + /** + * Method that returns the number of Get requests on the given SURL, that are in SRM_FILE_PINNED + * state. + * + * This method is intended to be used by PtGChunkCatalog in the isSRM_FILE_PINNED method + * invocation. + * + * In case of any error, 0 is returned. + */ + // request_Get table + public synchronized int numberInSRM_FILE_PINNED(int surlUniqueID) { + + return count(surlUniqueID, SRM_FILE_PINNED); + } + + public synchronized int count(int surlUniqueID, TStatusCode status) { + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + int count = 0; + + try { + con = getConnection(); + find = con.prepareStatement(COUNT_REQUEST_ON_SURL_WITH_STATUS); + find.setInt(1, surlUniqueID); + find.setInt(2, statusCodeConverter.toDB(status)); + log.trace("PtG CHUNK DAO - numberInSRM_FILE_PINNED method: {}", find); + rs = find.executeQuery(); + + if (rs.next()) { + count = rs.getInt(1); + } + } catch (SQLException e) { + log.error("PtG CHUNK DAO! Unable to determine numberInSRM_FILE_PINNED! " + "Returning 0! {}", + e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + return count; + } + + /** + * Method that updates all expired requests in SRM_FILE_PINNED state, into SRM_RELEASED. + * + * This is needed when the client forgets to invoke srmReleaseFiles(). + * + * @return + */ + public synchronized Collection transitExpiredSRM_FILE_PINNED() { + + Map expiredSurlMap = Maps.newHashMap(); + Set pinnedSurlSet = Sets.newHashSet(); + + Connection con = null; + PreparedStatement findExpired = null; + PreparedStatement updateExpired = null; + PreparedStatement findPtgPinnedSurls = null; + PreparedStatement findBolPinnedSurls = null; + ResultSet expired = null; + ResultSet ptgPinnedSurls = null; + ResultSet bolPinnedSurls = null; + + /* Find all expired SURLs */ + try { + // start transaction + con = getManagedConnection(); + + findExpired = con.prepareStatement(SELECT_EXPIRED_REQUESTS); + findExpired.setInt(1, statusCodeConverter.toDB(SRM_FILE_PINNED)); + + expired = findExpired.executeQuery(); + + while (expired.next()) { + String sourceSURL = expired.getString("rg.sourceSURL"); + Integer uniqueID = Integer.valueOf(expired.getInt("rg.sourceSURL_uniqueID")); + /* If the uniqueID is not set compute it */ + if (expired.wasNull()) { + try { + TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); + uniqueID = tsurl.uniqueId(); + } catch (InvalidTSURLAttributesException e) { + log.warn("PtGChunkDAO! unable to build the TSURL from {}: " + + "InvalidTSURLAttributesException {}", sourceSURL, e.getMessage(), e); + } + } + expiredSurlMap.put(sourceSURL, uniqueID); + } + + if (expiredSurlMap.isEmpty()) { + con.commit(); + log.trace( + "PtGChunkDAO! No chunk of PtG request was transited from SRM_FILE_PINNED to SRM_RELEASED."); + return Lists.newArrayList(); + } + + updateExpired = con.prepareStatement(UPDATE_STATUS_OF_EXPIRED_REQUESTS); + updateExpired.setInt(1, statusCodeConverter.toDB(SRM_RELEASED)); + updateExpired.setInt(2, statusCodeConverter.toDB(SRM_FILE_PINNED)); + log.trace("PtG CHUNK DAO - transitExpiredSRM_FILE_PINNED method: {}", updateExpired); + int count = updateExpired.executeUpdate(); + + if (count == 0) { + log.trace("PtGChunkDAO! No chunk of PtG request was " + + "transited from SRM_FILE_PINNED to SRM_RELEASED."); + } else { + log.info("PtGChunkDAO! {} chunks of PtG requests were transited from" + + " SRM_FILE_PINNED to SRM_RELEASED.", count); + } + + /* + * in order to enhance performance here we can check if there is any file system with tape + * (T1D0, T1D1), if there is not any we can skip the following + */ + + /* Find all not expired SURLs from PtG and BoL */ + + findPtgPinnedSurls = con.prepareStatement(SELECT_PTG_PINNED_SURLS); + findPtgPinnedSurls.setInt(1, statusCodeConverter.toDB(SRM_FILE_PINNED)); + + ptgPinnedSurls = findPtgPinnedSurls.executeQuery(); + + while (ptgPinnedSurls.next()) { + String sourceSURL = ptgPinnedSurls.getString("rg.sourceSURL"); + Integer uniqueID = Integer.valueOf(ptgPinnedSurls.getInt("rg.sourceSURL_uniqueID")); + /* If the uniqueID is not setted compute it */ + if (ptgPinnedSurls.wasNull()) { + try { + TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); + uniqueID = tsurl.uniqueId(); + } catch (InvalidTSURLAttributesException e) { + log.warn("PtGChunkDAO! unable to build the TSURL from {}. " + + "InvalidTSURLAttributesException: {}", sourceSURL, e.getMessage()); + } + } + pinnedSurlSet.add(uniqueID); + } + + // SURLs pinned by BoLs + findBolPinnedSurls = con.prepareStatement(SELECT_BOL_PINNED_SURLS); + findBolPinnedSurls.setInt(1, statusCodeConverter.toDB(SRM_SUCCESS)); + bolPinnedSurls = findBolPinnedSurls.executeQuery(); + + while (bolPinnedSurls.next()) { + String sourceSURL = bolPinnedSurls.getString("rb.sourceSURL"); + Integer uniqueID = Integer.valueOf(bolPinnedSurls.getInt("rb.sourceSURL_uniqueID")); + /* If the uniqueID is not setted compute it */ + if (bolPinnedSurls.wasNull()) { + try { + TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); + uniqueID = tsurl.uniqueId(); + } catch (InvalidTSURLAttributesException e) { + log.warn("PtGChunkDAO! unable to build the TSURL from {}. " + + "InvalidTSURLAttributesException: {}", sourceSURL, e.getMessage(), e); + } + } + pinnedSurlSet.add(uniqueID); + } + + con.commit(); + } catch (SQLException e) { + log.error("PtGChunkDAO! SQLException. {}", e.getMessage(), e); + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeStatement(findExpired); + closeStatement(updateExpired); + closeStatement(findPtgPinnedSurls); + closeStatement(findBolPinnedSurls); + closeResultSet(expired); + closeResultSet(ptgPinnedSurls); + closeResultSet(bolPinnedSurls); + closeConnection(con); + } + + Collection expiredSurlList = Lists.newArrayList(); + /* Remove the Extended Attribute pinned if there is not a valid SURL on it */ + TSURL surl; + for (Entry surlEntry : expiredSurlMap.entrySet()) { + if (!pinnedSurlSet.contains(surlEntry.getValue())) { + try { + surl = TSURL.makeFromStringValidate(surlEntry.getKey()); + } catch (InvalidTSURLAttributesException e) { + log.error("Invalid SURL, cannot release the pin " + "(Extended Attribute): {}", + surlEntry.getKey()); + continue; + } + expiredSurlList.add(surl); + StoRI stori; + try { + stori = Namespace.getInstance().resolveStoRIbySURL(surl); + } catch (Throwable e) { + log.error("Invalid SURL {} cannot release the pin. {}: {}", surlEntry.getKey(), + e.getClass().getCanonicalName(), e.getMessage(), e); + continue; + } + + if (stori.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) { + StormEA.removePinned(stori.getAbsolutePath()); + } + } + } + return expiredSurlList; + } + + /** + * Method that updates all chunks in SRM_FILE_PINNED state, into SRM_RELEASED. An array of long + * representing the primary key of each chunk is required: only they get the status changed + * provided their current status is SRM_FILE_PINNED. + * + * This method is used during srmReleaseFiles + * + * In case of any error nothing happens and no exception is thrown, but proper messagges get + * logged. + */ + public synchronized void transitSRM_FILE_PINNEDtoSRM_RELEASED(long[] ids) { + + String str = "UPDATE status_Get sg SET sg.statusCode=? " + + "WHERE sg.statusCode=? AND sg.request_GetID IN " + makeWhereString(ids); + + Connection con = null; + PreparedStatement stmt = null; + try { + + con = getConnection(); + stmt = con.prepareStatement(str); + stmt.setInt(1, statusCodeConverter.toDB(SRM_RELEASED)); + stmt.setInt(2, statusCodeConverter.toDB(SRM_FILE_PINNED)); + log.trace("PtG CHUNK DAO - transitSRM_FILE_PINNEDtoSRM_RELEASED: {}", stmt); + int count = stmt.executeUpdate(); + if (count == 0) { + log.trace("PtG CHUNK DAO! No chunk of PtG request was " + + "transited from SRM_FILE_PINNED to SRM_RELEASED."); + } else { + log.info("PtG CHUNK DAO! {} chunks of PtG requests were transited " + + "from SRM_FILE_PINNED to SRM_RELEASED.", count); + } + } catch (SQLException e) { + log.error( + "PtG CHUNK DAO! Unable to transit chunks" + " from SRM_FILE_PINNED to SRM_RELEASED! {}", + e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * @param ids + * @param token + */ + public synchronized void transitSRM_FILE_PINNEDtoSRM_RELEASED(long[] ids, TRequestToken token) { + + if (token == null) { + transitSRM_FILE_PINNEDtoSRM_RELEASED(ids); + return; + } + + /* + * If a request token has been specified, only the related Get requests have to be released. + * This is done adding the r.r_token="..." clause in the where subquery. + */ + String str = "UPDATE " + + "status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " + + "SET sg.statusCode=? " + "WHERE sg.statusCode=? AND rq.r_token='" + token.getValue() + + "' AND rg.ID IN " + makeWhereString(ids); + + Connection con = null; + PreparedStatement stmt = null; + try { + con = getConnection(); + stmt = con.prepareStatement(str); + stmt.setInt(1, statusCodeConverter.toDB(SRM_RELEASED)); + stmt.setInt(2, statusCodeConverter.toDB(SRM_FILE_PINNED)); + log.trace("PtG CHUNK DAO - transitSRM_FILE_PINNEDtoSRM_RELEASED: {}", stmt); + int count = stmt.executeUpdate(); + if (count == 0) { + log.trace("PtG CHUNK DAO! No chunk of PtG request was" + + " transited from SRM_FILE_PINNED to SRM_RELEASED."); + } else { + log.info("PtG CHUNK DAO! {} chunks of PtG requests were transited from " + + "SRM_FILE_PINNED to SRM_RELEASED.", count); + } + } catch (SQLException e) { + log.error( + "PtG CHUNK DAO! Unable to transit chunks from " + "SRM_FILE_PINNED to SRM_RELEASED! {}", + e.getMessage(), e); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + public synchronized void updateStatus(TRequestToken requestToken, int[] surlUniqueIDs, + String[] surls, TStatusCode statusCode, String explanation) { + + String str = "UPDATE " + + "status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " + + "SET sg.statusCode=? , sg.explanation=? " + "WHERE rq.r_token='" + requestToken.toString() + + "' AND ( rg.sourceSURL_uniqueID IN " + makeSURLUniqueIDWhere(surlUniqueIDs) + + " AND rg.sourceSURL IN " + makeSurlString(surls) + " ) "; + + Connection con = null; + PreparedStatement stmt = null; + + try { + con = getConnection(); + stmt = con.prepareStatement(str); + stmt.setInt(1, statusCodeConverter.toDB(statusCode)); + stmt.setString(2, (explanation != null ? explanation : "")); + log.trace("PtG CHUNK DAO - updateStatus: {}", stmt); + int count = stmt.executeUpdate(); + if (count == 0) { + log.trace("PtG CHUNK DAO! No chunk of PtG request was updated to {}.", statusCode); + } else { + log.info("PtG CHUNK DAO! {} chunks of PtG requests were updated to {}.", count, statusCode); + } + } catch (SQLException e) { + log.error("PtG CHUNK DAO! Unable to updated to {}! {}", statusCode, e.getMessage(), e); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + public synchronized void updateStatusOnMatchingStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation) { + + if (requestToken == null || requestToken.getValue().trim().isEmpty() || explanation == null) { + throw new IllegalArgumentException("Unable to perform the updateStatusOnMatchingStatus, " + + "invalid arguments: requestToken=" + requestToken + " explanation=" + explanation); + } + doUpdateStatusOnMatchingStatus(requestToken, null, null, expectedStatusCode, newStatusCode, + explanation, true, false, true); + } + + private synchronized void doUpdateStatusOnMatchingStatus(TRequestToken requestToken, + int[] surlUniqueIDs, String[] surls, TStatusCode expectedStatusCode, + TStatusCode newStatusCode, String explanation, boolean withRequestToken, boolean withSurls, + boolean withExplanation) throws IllegalArgumentException { + + if ((withRequestToken && requestToken == null) || (withExplanation && explanation == null) + || (withSurls && (surlUniqueIDs == null || surls == null))) { + + throw new IllegalArgumentException("Unable to perform the doUpdateStatusOnMatchingStatus, " + + "invalid arguments: withRequestToken=" + withRequestToken + " requestToken=" + + requestToken + " withSurls=" + withSurls + " surlUniqueIDs=" + surlUniqueIDs + " surls=" + + surls + " withExplaination=" + withExplanation + " explanation=" + explanation); + } + + String str = "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) " + + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " + "SET sg.statusCode=? "; + if (withExplanation) { + str += " , " + buildExpainationSet(explanation); + } + str += " WHERE sg.statusCode=? "; + if (withRequestToken) { + str += " AND " + buildTokenWhereClause(requestToken); + } + if (withSurls) { + str += " AND " + buildSurlsWhereClause(surlUniqueIDs, surls); + } + + Connection con = null; + PreparedStatement stmt = null; + + try { + con = getConnection(); + stmt = con.prepareStatement(str); + stmt.setInt(1, statusCodeConverter.toDB(newStatusCode)); + stmt.setInt(2, statusCodeConverter.toDB(expectedStatusCode)); + log.trace("PtG CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt); + int count = stmt.executeUpdate(); + if (count == 0) { + log.trace("PtG CHUNK DAO! No chunk of PtG request was updated " + "from {} to {}.", + expectedStatusCode, newStatusCode); + } else { + log.debug("PtG CHUNK DAO! {} chunks of PtG requests were updated " + "from {} to {}.", + count, expectedStatusCode, newStatusCode); + } + } catch (SQLException e) { + log.error("PtG CHUNK DAO! Unable to updated from {} to {}! {}", expectedStatusCode, + newStatusCode, e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * Method that returns a String containing all IDs. + */ + private String makeWhereString(long[] rowids) { + + StringBuilder sb = new StringBuilder("("); + int n = rowids.length; + for (int i = 0; i < n; i++) { + sb.append(rowids[i]); + if (i < (n - 1)) { + sb.append(","); + } + } + sb.append(")"); + return sb.toString(); + } + + /** + * Method that returns a String containing all SURL's IDs. + */ + private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) { + + StringBuilder sb = new StringBuilder("("); + for (int i = 0; i < surlUniqueIDs.length; i++) { + if (i > 0) { + sb.append(","); + } + sb.append(surlUniqueIDs[i]); + } + sb.append(")"); + return sb.toString(); + } + + /** + * Method that returns a String containing all SURLs. + */ + private String makeSurlString(String[] surls) { + + StringBuilder sb = new StringBuilder("("); + int n = surls.length; + + for (int i = 0; i < n; i++) { + + SURL requestedSURL; + + try { + requestedSURL = SURL.makeSURLfromString(surls[i]); + } catch (NamespaceException e) { + log.error(e.getMessage()); + log.debug("Skip '{}' during query creation", surls[i]); + continue; + } + + sb.append("'"); + sb.append(requestedSURL.getNormalFormAsString()); + sb.append("','"); + sb.append(requestedSURL.getQueryFormAsString()); + sb.append("'"); + + if (i < (n - 1)) { + sb.append(","); + } + } + + sb.append(")"); + return sb.toString(); + } + + private String buildExpainationSet(String explanation) { + + return " sg.explanation='" + explanation + "' "; + } + + private String buildTokenWhereClause(TRequestToken requestToken) { + + return " rq.r_token='" + requestToken.toString() + "' "; + } + + private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) { + + return " ( rg.sourceSURL_uniqueID IN " + makeSURLUniqueIDWhere(surlsUniqueIDs) + + " AND rg.sourceSURL IN " + makeSurlString(surls) + " ) "; + } + +} diff --git a/src/main/java/it/grid/storm/persistence/impl/mysql/PtPChunkDAOMySql.java b/src/main/java/it/grid/storm/persistence/impl/mysql/PtPChunkDAOMySql.java new file mode 100644 index 000000000..6a1485b91 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/PtPChunkDAOMySql.java @@ -0,0 +1,785 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.impl.mysql; + +import static it.grid.storm.catalogs.ChunkDAOUtils.buildInClauseForArray; +import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED; +import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; +import static it.grid.storm.srm.types.TStatusCode.SRM_FILE_LIFETIME_EXPIRED; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_INPROGRESS; +import static it.grid.storm.srm.types.TStatusCode.SRM_SPACE_AVAILABLE; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import org.apache.commons.lang.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.naming.SURL; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.PtPChunkDAO; +import it.grid.storm.persistence.model.PtPChunkDataTO; +import it.grid.storm.persistence.model.ReducedPtPChunkDataTO; +import it.grid.storm.persistence.pool.impl.StormDbConnectionPool; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TStatusCode; + +/** + * DAO class for PtPChunkCatalog. This DAO is specifically designed to connect to a MySQL DB. The + * raw data found in those tables is pre-treated in order to turn it into the Object Model of StoRM. + * See Method comments for further info. BEWARE! DAO Adjusts for extra fields in the DB that are not + * present in the object model. + * + * @author EGRID ICTP + * @version 2.0 + * @date June 2005 + */ +public class PtPChunkDAOMySql extends AbstractDAO implements PtPChunkDAO { + + private static final Logger log = LoggerFactory.getLogger(PtPChunkDAOMySql.class); + + private static final String UPDATE_REQUEST_PUT_WHERE_ID_IS = "UPDATE " + + "request_queue rq JOIN (status_Put sp, request_Put rp) ON " + + "(rq.ID=rp.request_queueID AND sp.request_PutID=rp.ID) " + + "SET sp.transferURL=?, sp.statusCode=?, sp.explanation=?, rq.pinLifetime=?, rq.fileLifetime=?, " + + "rq.config_FileStorageTypeID=?, rq.config_OverwriteID=?, " + + "rp.normalized_targetSURL_StFN=?, rp.targetSURL_uniqueID=? " + "WHERE rp.ID=?"; + + private static final String UPDATE_REDUCED_REQUEST_PUT_WHERE_ID_IS = + "UPDATE request_Put SET normalized_targetSURL_StFN=?, targetSURL_uniqueID=? " + "WHERE ID=?"; + + private static final String SELECT_REQUEST_PROTOCOLS_WHERE_TOKEN_IS = + "SELECT tp.config_ProtocolsID " + + "FROM request_TransferProtocols tp JOIN request_queue rq ON tp.request_queueID=rq.ID " + + "WHERE rq.r_token=?"; + + private static final String SELECT_FULL_REQUEST_PUT_WHERE_TOKEN_AND_STATUS = + "SELECT rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, rq.s_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, sp.statusCode " + + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " + + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " + + "WHERE rq.r_token=? AND sp.statusCode<>?"; + + private static final String UPDATE_STATUS_PUT_WHERE_ID_IS = + "UPDATE status_Put sp SET sp.statusCode=?, sp.explanation=? WHERE sp.request_PutID=?"; + + private static final String SELECT_EXPIRED_REQUEST_PUT_WHERE_STATUS_IS = + "SELECT rp.ID, rp.targetSURL " + + "FROM status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " + + "WHERE sp.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; + + private static PtPChunkDAO instance; + + public static synchronized PtPChunkDAO getInstance() { + if (instance == null) { + instance = new PtPChunkDAOMySql(); + } + return instance; + } + + private StatusCodeConverter statusCodeConverter; + + private PtPChunkDAOMySql() { + + super(StormDbConnectionPool.getInstance()); + statusCodeConverter = StatusCodeConverter.getInstance(); + } + + /** + * Method used to save the changes made to a retrieved PtPChunkDataTO, back into the MySQL DB. + * Only the transferURL, statusCode and explanation, of status_Put table get written to the DB. + * Likewise for the pinLifetime and fileLifetime of request_queue. In case of any error, an error + * message gets logged but no exception is thrown. + */ + public synchronized void update(PtPChunkDataTO to) { + + Connection con = null; + PreparedStatement updatePut = null; + try { + con = getConnection(); + updatePut = con.prepareStatement(UPDATE_REQUEST_PUT_WHERE_ID_IS); + + updatePut.setString(1, to.transferURL()); + updatePut.setInt(2, to.status()); + updatePut.setString(3, to.errString()); + updatePut.setInt(4, to.pinLifetime()); + updatePut.setInt(5, to.fileLifetime()); + updatePut.setString(6, to.fileStorageType()); + updatePut.setString(7, to.overwriteOption()); + updatePut.setString(8, to.normalizedStFN()); + updatePut.setInt(9, to.surlUniqueID()); + updatePut.setLong(10, to.primaryKey()); + // run updateStatusPut... + log.trace("PtP CHUNK DAO - update method: {}", updatePut); + updatePut.executeUpdate(); + } catch (SQLException e) { + log.error("PtP CHUNK DAO: Unable to complete update! {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(updatePut); + closeConnection(con); + } + } + + /** + * Updates the request_Put represented by the received ReducedPtPChunkDataTO by setting its + * normalized_targetSURL_StFN and targetSURL_uniqueID + * + * @param chunkTO + */ + public synchronized void updateIncomplete(ReducedPtPChunkDataTO chunkTO) { + + Connection con = null; + PreparedStatement stmt = null; + + try { + con = getConnection(); + stmt = con.prepareStatement(UPDATE_REDUCED_REQUEST_PUT_WHERE_ID_IS); + stmt.setString(1, chunkTO.normalizedStFN()); + stmt.setInt(2, chunkTO.surlUniqueID()); + stmt.setLong(3, chunkTO.primaryKey()); + log.trace("PtP CHUNK DAO - update incomplete: {}", stmt); + stmt.executeUpdate(); + } catch (SQLException e) { + log.error("PtP CHUNK DAO: Unable to complete update incomplete! {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * Method that queries the MySQL DB to find all entries matching the supplied TRequestToken. The + * Collection contains the corresponding PtPChunkDataTO objects. An initial simple query + * establishes the list of protocols associated with the request. A second complex query + * establishes all chunks associated with the request, by properly joining request_queue, + * request_Put and status_Put. The considered fields are: (1) From status_Put: the ID field which + * becomes the TOs primary key, and statusCode. (2) From request_Put: targetSURL and + * expectedFileSize. (3) From request_queue: pinLifetime, fileLifetime, config_FileStorageTypeID, + * s_token, config_OverwriteID. In case of any error, a log gets written and an empty collection + * is returned. No exception is returned. NOTE! Chunks in SRM_ABORTED status are NOT returned! + * This is important because this method is intended to be used by the Feeders to fetch all chunks + * in the request, and aborted chunks should not be picked up for processing! + */ + public synchronized Collection find(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement findProtocols = null; + PreparedStatement findRequest = null; + ResultSet rsProtocols = null; + ResultSet rsRequest = null; + + Collection results = Lists.newArrayList(); + + try { + + con = getManagedConnection(); + findProtocols = con.prepareStatement(SELECT_REQUEST_PROTOCOLS_WHERE_TOKEN_IS); + + findProtocols.setString(1, requestToken.getValue()); + + log.trace("PtP CHUNK DAO - find method: {}", findProtocols); + rsProtocols = findProtocols.executeQuery(); + + List protocols = Lists.newArrayList(); + while (rsProtocols.next()) { + protocols.add(rsProtocols.getString("tp.config_ProtocolsID")); + } + + // get chunks of the request + findRequest = con.prepareStatement(SELECT_FULL_REQUEST_PUT_WHERE_TOKEN_AND_STATUS); + findRequest.setString(1, requestToken.getValue()); + findRequest.setInt(2, statusCodeConverter.toDB(SRM_ABORTED)); + log.trace("PtP CHUNK DAO - find method: {}", findRequest); + rsRequest = findRequest.executeQuery(); + + while (rsRequest.next()) { + PtPChunkDataTO chunkDataTO = new PtPChunkDataTO(); + chunkDataTO.setFileStorageType(rsRequest.getString("rq.config_FileStorageTypeID")); + chunkDataTO.setOverwriteOption(rsRequest.getString("rq.config_OverwriteID")); + chunkDataTO.setTimeStamp(rsRequest.getTimestamp("rq.timeStamp")); + chunkDataTO.setPinLifetime(rsRequest.getInt("rq.pinLifetime")); + chunkDataTO.setFileLifetime(rsRequest.getInt("rq.fileLifetime")); + chunkDataTO.setSpaceToken(rsRequest.getString("rq.s_token")); + chunkDataTO.setClientDN(rsRequest.getString("rq.client_dn")); + + /** + * This code is only for the 1.3.18. This is a workaround to get FQANs using the proxy field + * on request_queue. The FE use the proxy field of request_queue to insert a single FQAN + * string containing all FQAN separated by the "#" char. The proxy is a BLOB, hence it has + * to be properly converted in string. + */ + java.sql.Blob blob = rsRequest.getBlob("rq.proxy"); + if (!rsRequest.wasNull() && blob != null) { + byte[] bdata = blob.getBytes(1, (int) blob.length()); + chunkDataTO.setVomsAttributes(new String(bdata)); + } + chunkDataTO.setPrimaryKey(rsRequest.getLong("rp.ID")); + chunkDataTO.setToSURL(rsRequest.getString("rp.targetSURL")); + + chunkDataTO.setNormalizedStFN(rsRequest.getString("rp.normalized_targetSURL_StFN")); + int uniqueID = rsRequest.getInt("rp.targetSURL_uniqueID"); + if (!rsRequest.wasNull()) { + chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); + } + + chunkDataTO.setExpectedFileSize(rsRequest.getLong("rp.expectedFileSize")); + chunkDataTO.setProtocolList(protocols); + chunkDataTO.setRequestToken(requestToken.getValue()); + chunkDataTO.setStatus(rsRequest.getInt("sp.statusCode")); + results.add(chunkDataTO); + } + con.commit(); + } catch (SQLException e) { + log.error("PTP CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rsProtocols); + closeResultSet(rsRequest); + closeStatement(findProtocols); + closeStatement(findRequest); + closeConnection(con); + } + return results; + } + + /** + * Method used in extraordinary situations to signal that data retrieved from the DB was malformed + * and could not be translated into the StoRM object model. This method attempts to change the + * status of the chunk to SRM_FAILURE and record it in the DB, in the status_Put table. This + * operation could potentially fail because the source of the malformed problems could be a + * problematic DB; indeed, initially only log messages were recorded. Yet it soon became clear + * that the source of malformed data were actually the clients themselves and/or FE recording in + * the DB. In these circumstances the client would find its request as being in the + * SRM_IN_PROGRESS state for ever. Hence the pressing need to inform it of the encountered + * problems. + */ + public synchronized int fail(PtPChunkDataTO auxTO) { + + Connection con = null; + PreparedStatement signal = null; + int updated = 0; + + try { + con = getConnection(); + signal = con.prepareStatement(UPDATE_STATUS_PUT_WHERE_ID_IS); + signal.setInt(1, statusCodeConverter.toDB(SRM_FAILURE)); + signal.setString(2, "This chunk of the request is malformed!"); + signal.setLong(3, auxTO.primaryKey()); + log.trace("PtP CHUNK DAO - signalMalformedPtPChunk method: {}", signal); + updated = signal.executeUpdate(); + } catch (SQLException e) { + log.error( + "PtPChunkDAO! Unable to signal in DB that a chunk of " + + "the request was malformed! Request: {}; Error: {}", + auxTO.toString(), e.getMessage(), e); + e.printStackTrace(); + updated = 0; + } finally { + closeStatement(signal); + closeConnection(con); + } + return updated; + } + + /** + * Method that retrieves all expired requests in SRM_SPACE_AVAILABLE state. + * + * @return a Map containing the ID of the request as key and the relative SURL as value + */ + public synchronized Map getExpiredSRM_SPACE_AVAILABLE() { + + return getExpired(SRM_SPACE_AVAILABLE); + } + + public synchronized Map getExpired(TStatusCode status) { + + Map expiredRequests = Maps.newHashMap(); + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + + try { + + con = getConnection(); + find = con.prepareStatement(SELECT_EXPIRED_REQUEST_PUT_WHERE_STATUS_IS); + find.setInt(1, statusCodeConverter.toDB(status)); + log.trace("PtP CHUNK DAO - getExpiredSRM_SPACE_AVAILABLE: {}", find); + rs = find.executeQuery(); + while (rs.next()) { + expiredRequests.put(rs.getLong("rp.ID"), rs.getString("rp.targetSURL")); + } + + } catch (SQLException e) { + + log.error("PtPChunkDAO! Unable to select expired " + + "SRM_SPACE_AVAILABLE chunks of PtP requests. {}", e.getMessage(), e); + e.printStackTrace(); + + } finally { + + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + return expiredRequests; + } + + /** + * Method that updates chunks in SRM_SPACE_AVAILABLE state, into SRM_FILE_LIFETIME_EXPIRED. An + * array of Long representing the primary key of each chunk is required. This is needed when the + * client forgets to invoke srmPutDone(). In case of any error or exception, the returned int + * value will be zero or less than the input List size. + * + * @param the list of the request id to update + * + * @return The number of the updated records into the db + */ + public synchronized int transitExpiredSRM_SPACE_AVAILABLEtoSRM_FILE_LIFETIME_EXPIRED( + Collection ids) { + + Preconditions.checkNotNull(ids, "Invalid list of id"); + + String querySQL = "UPDATE status_Put sp " + + "JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " + + "SET sp.statusCode=?, sp.explanation=? " + + "WHERE sp.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; + + if (!ids.isEmpty()) { + querySQL += "AND rp.ID IN (" + StringUtils.join(ids.toArray(), ',') + ")"; + } + + Connection con = null; + PreparedStatement stmt = null; + int count = 0; + + try { + con = getConnection(); + stmt = con.prepareStatement(querySQL); + stmt.setInt(1, statusCodeConverter.toDB(SRM_FILE_LIFETIME_EXPIRED)); + stmt.setString(2, "Expired pinLifetime"); + stmt.setInt(3, statusCodeConverter.toDB(SRM_SPACE_AVAILABLE)); + + log.trace("PtP CHUNK DAO - transit SRM_SPACE_AVAILABLE to SRM_FILE_LIFETIME_EXPIRED: {}", + stmt); + + count = stmt.executeUpdate(); + + } catch (SQLException e) { + log.error("PtPChunkDAO! Unable to transit chunks from " + + "SRM_SPACE_AVAILABLE to SRM_FILE_LIFETIME_EXPIRED! {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + log.trace("PtPChunkDAO! {} chunks of PtP requests were transited " + + "from SRM_SPACE_AVAILABLE to SRM_FILE_LIFETIME_EXPIRED.", count); + return count; + } + + public synchronized int transitLongTimeInProgressRequestsToStatus(long expirationTime, TStatusCode status, String explanation) { + + String sql = "UPDATE request_queue rq, request_Put rp, status_Put sp " + + "SET rq.status=?, sp.statusCode=?, sp.explanation=? " + + "WHERE rq.ID = rp.request_queueID and rp.ID = sp.request_PutID " + + "AND rq.status=? AND rq.timeStamp <= DATE_SUB(CURRENT_TIMESTAMP(), INTERVAL ? SECOND)"; + + Connection con = null; + PreparedStatement stmt = null; + int count = 0; + + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setInt(1, statusCodeConverter.toDB(status)); + stmt.setInt(2, statusCodeConverter.toDB(status)); + stmt.setString(3, explanation); + stmt.setInt(4, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + stmt.setLong(5, expirationTime); + log.trace("PtP CHUNK DAO - transit SRM_REQUEST_INPROGRESS to {}: {}", status, stmt); + count = stmt.executeUpdate(); + + } catch (SQLException e) { + log.error("PtPChunkDAO! Unable to transit chunks from " + + "SRM_REQUEST_INPROGRESS to SRM_FAILURE! {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + return count; + } + + public synchronized int updateStatus(Collection ids, TStatusCode fromStatus, + TStatusCode toStatus, String explanation) { + + Preconditions.checkNotNull(ids, "Invalid list of id"); + + if (ids.isEmpty()) { + return 0; + } + + String querySQL = "UPDATE request_queue rq, request_Put rp, status_Put sp " + + "SET rq.status=?, sp.statusCode=?, sp.explanation=? " + + "WHERE rq.ID = rp.request_queueID and rp.ID = sp.request_PutID " + + "AND rq.status=? AND rq.ID IN (" + buildInClauseForArray(ids.size()) + ")"; + + Connection con = null; + PreparedStatement stmt = null; + int count = 0; + + try { + con = getConnection(); + stmt = con.prepareStatement(querySQL); + stmt.setInt(1, statusCodeConverter.toDB(toStatus)); + stmt.setInt(2, statusCodeConverter.toDB(toStatus)); + stmt.setString(3, explanation); + stmt.setInt(4, statusCodeConverter.toDB(fromStatus)); + int i = 5; + for (Long id : ids) { + stmt.setLong(i, id); + i++; + } + log.trace("PtP CHUNK DAO - transit SRM_REQUEST_INPROGRESS to SRM_FAILURE: {}", stmt); + count = stmt.executeUpdate(); + + } catch (SQLException e) { + log.error("PtPChunkDAO! Unable to transit chunks from " + + "SRM_REQUEST_INPROGRESS to SRM_FAILURE! {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + log.trace("PtPChunkDAO! {} chunks of PtP requests were transited " + + "from SRM_REQUEST_INPROGRESS to SRM_FAILURE.", count); + return count; + } + + public synchronized int updateStatus(TRequestToken requestToken, int[] surlsUniqueIDs, + String[] surls, TStatusCode statusCode, String explanation) { + + if (requestToken == null || requestToken.getValue().trim().isEmpty() || explanation == null) { + throw new IllegalArgumentException("Unable to perform the updateStatus, " + + "invalid arguments: requestToken=" + requestToken + " explanation=" + explanation); + } + return doUpdateStatus(requestToken, surlsUniqueIDs, surls, statusCode, explanation, true, true); + } + + private int doUpdateStatus(TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls, + TStatusCode statusCode, String explanation, boolean withRequestToken, + boolean withExplaination) throws IllegalArgumentException { + + if ((withRequestToken && requestToken == null) || (withExplaination && explanation == null)) { + throw new IllegalArgumentException( + "Unable to perform the updateStatus, " + "invalid arguments: withRequestToken=" + + withRequestToken + " requestToken=" + requestToken + " withExplaination=" + + withExplaination + " explaination=" + explanation); + } + + String str = + "UPDATE status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND " + + "rp.request_queueID=rq.ID " + "SET sp.statusCode=? "; + if (withExplaination) { + str += " , " + buildExpainationSet(explanation); + } + str += " WHERE "; + if (withRequestToken) { + str += buildTokenWhereClause(requestToken) + " AND "; + } + str += " ( rp.targetSURL_uniqueID IN " + makeSURLUniqueIDWhere(surlsUniqueIDs) + + " AND rp.targetSURL IN " + makeSurlString(surls) + " ) "; + + Connection con = null; + PreparedStatement stmt = null; + int count = 0; + + try { + con = getConnection(); + stmt = con.prepareStatement(str); + stmt.setInt(1, statusCodeConverter.toDB(statusCode)); + + log.trace("PTP CHUNK DAO - updateStatus: {}", stmt); + count = stmt.executeUpdate(); + if (count == 0) { + log.trace("PTP CHUNK DAO! No chunk of PTP request was updated to {}.", statusCode); + } else { + log.info("PTP CHUNK DAO! {} chunks of PTP requests were updated " + "to {}.", count, + statusCode); + } + } catch (SQLException e) { + log.error("PTP CHUNK DAO! Unable to updated from to {}! {}", statusCode, e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + return count; + } + + public synchronized int updateStatusOnMatchingStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation) { + + if (requestToken == null || requestToken.getValue().trim().isEmpty() || explanation == null) { + throw new IllegalArgumentException("Unable to perform the updateStatusOnMatchingStatus, " + + "invalid arguments: requestToken=" + requestToken + " explanation=" + explanation); + } + return doUpdateStatusOnMatchingStatus(requestToken, null, null, expectedStatusCode, + newStatusCode, explanation, true, false, true); + } + + public synchronized int updateStatusOnMatchingStatus(TRequestToken requestToken, + int[] surlsUniqueIDs, String[] surls, TStatusCode expectedStatusCode, + TStatusCode newStatusCode) { + + if (requestToken == null || requestToken.getValue().trim().isEmpty() || surlsUniqueIDs == null + || surls == null || surlsUniqueIDs.length == 0 || surls.length == 0 + || surlsUniqueIDs.length != surls.length) { + throw new IllegalArgumentException("Unable to perform the updateStatusOnMatchingStatus, " + + "invalid arguments: requestToken=" + requestToken + "surlsUniqueIDs=" + surlsUniqueIDs + + " surls=" + surls); + } + return doUpdateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, expectedStatusCode, + newStatusCode, null, true, true, false); + } + + private int doUpdateStatusOnMatchingStatus(TRequestToken requestToken, int[] surlsUniqueIDs, + String[] surls, TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation, + boolean withRequestToken, boolean withSurls, boolean withExplanation) { + + if ((withRequestToken && requestToken == null) || (withExplanation && explanation == null) + || (withSurls && (surlsUniqueIDs == null || surls == null))) { + throw new IllegalArgumentException("Unable to perform the doUpdateStatusOnMatchingStatus, " + + "invalid arguments: withRequestToken=" + withRequestToken + " requestToken=" + + requestToken + " withSurls=" + withSurls + " surlsUniqueIDs=" + surlsUniqueIDs + + " surls=" + surls + " withExplaination=" + withExplanation + " explanation=" + + explanation); + } + + String str = "UPDATE " + + "status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " + + "SET sp.statusCode=? "; + if (withExplanation) { + str += " , " + buildExpainationSet(explanation); + } + str += " WHERE sp.statusCode=? "; + if (withRequestToken) { + str += " AND " + buildTokenWhereClause(requestToken); + } + if (withSurls) { + str += " AND " + buildSurlsWhereClause(surlsUniqueIDs, surls); + } + + Connection con = null; + PreparedStatement stmt = null; + int count = 0; + + try { + con = getConnection(); + stmt = con.prepareStatement(str); + stmt.setInt(1, statusCodeConverter.toDB(newStatusCode)); + stmt.setInt(2, statusCodeConverter.toDB(expectedStatusCode)); + log.trace("PTP CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt); + count = stmt.executeUpdate(); + if (count == 0) { + log.trace("PTP CHUNK DAO! No chunk of PTP request was updated " + "from {} to {}.", + expectedStatusCode, newStatusCode); + } else { + log.debug("PTP CHUNK DAO! {} chunks of PTP requests were updated " + "from {} to {}.", + count, expectedStatusCode, newStatusCode); + } + } catch (SQLException e) { + log.error("PTP CHUNK DAO! Unable to updated from {} to {}! Error: {}", expectedStatusCode, + newStatusCode, e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + return count; + } + + public Collection find(int[] surlsUniqueIDs, String[] surlsArray, String dn) { + + if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 || surlsArray == null + || surlsArray.length == 0 || dn == null) { + throw new IllegalArgumentException( + "Unable to perform the find, " + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs + + " surlsArray=" + surlsArray + " dn=" + dn); + } + return find(surlsUniqueIDs, surlsArray, dn, true); + } + + private synchronized Collection find(int[] surlsUniqueIDs, String[] surlsArray, + String dn, boolean withDn) throws IllegalArgumentException { + + if ((withDn && dn == null) || surlsUniqueIDs == null || surlsUniqueIDs.length == 0 + || surlsArray == null || surlsArray.length == 0) { + throw new IllegalArgumentException( + "Unable to perform the find, " + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs + + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn); + } + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + + try { + // get chunks of the request + String str = + "SELECT rq.ID, rq.r_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, " + + "rq.s_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, " + + "sp.statusCode " + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " + + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " + + "WHERE ( rp.targetSURL_uniqueID IN " + makeSURLUniqueIDWhere(surlsUniqueIDs) + + " AND rp.targetSURL IN " + makeSurlString(surlsArray) + " )"; + + if (withDn) { + str += " AND rq.client_dn=\'" + dn + "\'"; + } + + con = getConnection(); + find = con.prepareStatement(str); + + List list = Lists.newArrayList(); + + log.trace("PtP CHUNK DAO - find method: {}", find); + rs = find.executeQuery(); + + while (rs.next()) { + + PtPChunkDataTO chunkDataTO = new PtPChunkDataTO(); + chunkDataTO.setFileStorageType(rs.getString("rq.config_FileStorageTypeID")); + chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); + chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); + chunkDataTO.setPinLifetime(rs.getInt("rq.pinLifetime")); + chunkDataTO.setFileLifetime(rs.getInt("rq.fileLifetime")); + chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); + chunkDataTO.setClientDN(rs.getString("rq.client_dn")); + + /** + * This code is only for the 1.3.18. This is a workaround to get FQANs using the proxy field + * on request_queue. The FE use the proxy field of request_queue to insert a single FQAN + * string containing all FQAN separated by the "#" char. The proxy is a BLOB, hence it has + * to be properly converted in string. + */ + java.sql.Blob blob = rs.getBlob("rq.proxy"); + if (!rs.wasNull() && blob != null) { + byte[] bdata = blob.getBytes(1, (int) blob.length()); + chunkDataTO.setVomsAttributes(new String(bdata)); + } + chunkDataTO.setPrimaryKey(rs.getLong("rp.ID")); + chunkDataTO.setToSURL(rs.getString("rp.targetSURL")); + + chunkDataTO.setNormalizedStFN(rs.getString("rp.normalized_targetSURL_StFN")); + int uniqueID = rs.getInt("rp.targetSURL_uniqueID"); + if (!rs.wasNull()) { + chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); + } + + chunkDataTO.setExpectedFileSize(rs.getLong("rp.expectedFileSize")); + chunkDataTO.setRequestToken(rs.getString("rq.r_token")); + chunkDataTO.setStatus(rs.getInt("sp.statusCode")); + list.add(chunkDataTO); + } + return list; + } catch (SQLException e) { + log.error("PTP CHUNK DAO: {}", e.getMessage(), e); + /* return empty Collection! */ + return Lists.newArrayList(); + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + } + + private String buildExpainationSet(String explanation) { + + return " sp.explanation='" + explanation + "' "; + } + + private String buildTokenWhereClause(TRequestToken requestToken) { + + return " rq.r_token='" + requestToken.toString() + "' "; + } + + private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) { + + return " ( rp.targetSURL_uniqueID IN " + makeSURLUniqueIDWhere(surlsUniqueIDs) + + " AND rp.targetSURL IN " + makeSurlString(surls) + " ) "; + } + + /** + * Method that returns a String containing all Surl's IDs. + */ + private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) { + + StringBuilder sb = new StringBuilder("("); + for (int i = 0; i < surlUniqueIDs.length; i++) { + if (i > 0) { + sb.append(","); + } + sb.append(surlUniqueIDs[i]); + } + sb.append(")"); + return sb.toString(); + } + + /** + * Method that returns a String containing all Surls. + */ + private String makeSurlString(String[] surls) { + + StringBuilder sb = new StringBuilder("("); + int n = surls.length; + + for (int i = 0; i < n; i++) { + + SURL requestedSURL; + + try { + requestedSURL = SURL.makeSURLfromString(surls[i]); + } catch (NamespaceException e) { + log.error(e.getMessage(), e); + log.debug("Skip '{}' during query creation", surls[i]); + continue; + } + + sb.append("'"); + sb.append(requestedSURL.getNormalFormAsString()); + sb.append("','"); + sb.append(requestedSURL.getQueryFormAsString()); + sb.append("'"); + + if (i < (n - 1)) { + sb.append(","); + } + } + + sb.append(")"); + return sb.toString(); + } + +} \ No newline at end of file diff --git a/src/main/java/it/grid/storm/persistence/impl/mysql/RequestSummaryDAOMySql.java b/src/main/java/it/grid/storm/persistence/impl/mysql/RequestSummaryDAOMySql.java new file mode 100644 index 000000000..412dde227 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/RequestSummaryDAOMySql.java @@ -0,0 +1,906 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.impl.mysql; + +import static it.grid.storm.srm.types.TRequestType.EMPTY; +import static it.grid.storm.srm.types.TRequestType.PREPARE_TO_GET; +import static it.grid.storm.srm.types.TRequestType.PREPARE_TO_PUT; +import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED; +import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_INPROGRESS; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_QUEUED; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Collection; +import java.util.Iterator; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; + +import it.grid.storm.config.StormConfiguration; +import it.grid.storm.persistence.converter.RequestTypeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.RequestSummaryDAO; +import it.grid.storm.persistence.model.RequestSummaryDataTO; +import it.grid.storm.persistence.pool.impl.StormDbConnectionPool; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TRequestType; +import it.grid.storm.srm.types.TStatusCode; + +/** + * DAO class for RequestSummaryCatalog. This DAO is specifically designed to connect to a MySQL DB. + * + * @author EGRID ICTP + * @version 3.0 + * @date May 2005 + */ +public class RequestSummaryDAOMySql extends AbstractDAO implements RequestSummaryDAO { + + private static final Logger log = LoggerFactory.getLogger(RequestSummaryDAOMySql.class); + + private static final String SELECT_REQUEST_WHERE_STATUS_WITH_LIMIT = + "SELECT ID, config_RequestTypeID, r_token, timeStamp, client_dn, proxy " + + "FROM request_queue WHERE status=? LIMIT ?"; + + private static final String UPDATE_REQUEST_STATUS_WHERE_ID_IS = + "UPDATE request_queue SET status=?, errstring=? WHERE ID=?"; + + private static final String UPDATE_REQUEST_STATUS_WHERE_TOKEN_IS = + "UPDATE request_queue SET status=?, errstring=? WHERE r_token=?"; + + private static final String UPDATE_REQUEST_STATUS_WHERE_TOKEN_AND_STATUS_ARE = + "UPDATE request_queue SET status=?, errstring=? WHERE r_token=? AND status=?"; + + private static final String UPDATE_REQUEST_STATUS_AND_PINLIFETIME_WHERE_TOKEN_IS = + "UPDATE request_queue " + + "SET status=?, errstring=?, pinLifetime=pinLifetime+(UNIX_TIMESTAMP()-UNIX_TIMESTAMP(timeStamp)) " + + "WHERE r_token=?"; + + private static final String SELECT_REQUEST_WHERE_TOKEN_IS = + "SELECT ID, config_RequestTypeID from request_queue WHERE r_token=?"; + + private static final String SELECT_FULL_REQUEST_WHERE_TOKEN_IS = + "SELECT * from request_queue WHERE r_token=?"; + + private static final String SELECT_REQUEST_WHERE_TOKEN_AND_STATUS = + "SELECT ID, config_RequestTypeID FROM request_queue WHERE r_token=? AND status=?"; + + private static final String UPDATE_REQUEST_GET_STATUS_WHERE_ID_IS = "UPDATE status_Get s " + + "JOIN (request_queue r, request_Get t) ON (s.request_GetID=t.ID AND t.request_queueID=r.ID) " + + "SET s.statusCode=?, s.explanation=? WHERE r.ID=?"; + + private static final String UPDATE_REQUEST_PUT_STATUS_WHERE_ID_IS = "UPDATE status_Put s " + + "JOIN (request_queue r, request_Put t) ON (s.request_PutID=t.ID AND t.request_queueID=r.ID) " + + "SET s.statusCode=?, s.explanation=? WHERE r.ID=?"; + + private static final String UPDATE_REQUEST_BOL_STATUS_WHERE_ID_IS = "UPDATE status_BoL s " + + "JOIN (request_queue r, request_BoL t) ON (s.request_BoLID=t.ID AND t.request_queueID=r.ID) " + + "SET s.statusCode=?, s.explanation=? WHERE r.ID=?"; + + private static final String UPDATE_REQUEST_GET_STATUS_WHERE_ID_IS_AND_SURL_IN = + "UPDATE status_Get s " + + "JOIN (request_queue r, request_Get t) ON (s.request_GetID=t.ID AND t.request_queueID=r.ID) " + + "SET s.statusCode=?, s.explanation=? WHERE r.ID=? AND sourceSURL IN "; + + private static final String UPDATE_REQUEST_PUT_STATUS_WHERE_ID_IS_AND_SURL_IN = + "UPDATE status_Put s " + + "JOIN (request_queue r, request_Put t) ON (s.request_PutID=t.ID AND t.request_queueID=r.ID) " + + "SET s.statusCode=?, s.explanation=? WHERE r.ID=? AND targetSURL IN "; + + private static final String UPDATE_REQUEST_BOL_STATUS_WHERE_ID_IS_AND_SURL_IN = + "UPDATE status_BoL s " + + "JOIN (request_queue r, request_BoL t) ON (s.request_BoLID=t.ID AND t.request_queueID=r.ID) " + + "SET s.statusCode=?, s.explanation=? WHERE r.ID=? AND sourceSURL IN "; + + private static final String SELECT_PURGEABLE_REQUESTS_WITH_LIMIT = + "SELECT ID, r_token FROM request_queue " + + "WHERE UNIX_TIMESTAMP(NOW()) - UNIX_TIMESTAMP(timeStamp) > ? AND status <> ? AND status <> ? LIMIT ?"; + + private static final String COUNT_PURGEABLE_REQUESTS = "SELECT count(*) FROM request_queue " + + "WHERE UNIX_TIMESTAMP(NOW()) - UNIX_TIMESTAMP(timeStamp) > ? AND status <> ? AND status <> ? "; + + private static final String DELETE_ORPHANS_DIR_OPTION = + "DELETE request_DirOption FROM request_DirOption " + + " LEFT JOIN request_Get ON request_DirOption.ID = request_Get.request_DirOptionID" + + " LEFT JOIN request_BoL ON request_DirOption.ID = request_BoL.request_DirOptionID " + + " LEFT JOIN request_Copy ON request_DirOption.ID = request_Copy.request_DirOptionID" + + " WHERE request_Copy.request_DirOptionID IS NULL AND" + + " request_Get.request_DirOptionID IS NULL AND" + + " request_BoL.request_DirOptionID IS NULL;"; + + private static RequestSummaryDAO instance; + + private final StatusCodeConverter statusCodeConverter; + private final RequestTypeConverter requestTypeConverter; + private final int MAX_FETCHED_REQUESTS = + StormConfiguration.getInstance().getPickingMaxBatchSize(); + + public static synchronized RequestSummaryDAO getInstance() { + if (instance == null) { + instance = new RequestSummaryDAOMySql(); + } + return instance; + } + + private RequestSummaryDAOMySql() { + super(StormDbConnectionPool.getInstance()); + statusCodeConverter = StatusCodeConverter.getInstance(); + requestTypeConverter = RequestTypeConverter.getInstance(); + } + + /** + * Method that retrieves requests in the SRM_REQUEST_QUEUED status: retrieved requests are limited + * to the number specified by the Configuration method getPicker2MaxBatchSize. All retrieved + * requests get their global status transited to SRM_REQUEST_INPROGRESS. A Collection of + * RequestSummaryDataTO is returned: if none are found, an empty collection is returned. + */ + public synchronized Collection fetchNewRequests(int limit) { + + Connection con = null; + PreparedStatement fetch = null; + PreparedStatement update = null; + ResultSet fetched = null; + Collection results = Lists.newArrayList(); + int howMuch = limit > MAX_FETCHED_REQUESTS ? MAX_FETCHED_REQUESTS : limit; + + try { + con = getManagedConnection(); + + // get id, request type, request token and client_DN of newly added + // requests, which must be in SRM_REQUEST_QUEUED state + fetch = con.prepareStatement(SELECT_REQUEST_WHERE_STATUS_WITH_LIMIT); + fetch.setInt(1, statusCodeConverter.toDB(SRM_REQUEST_QUEUED)); + fetch.setInt(2, howMuch); + fetched = fetch.executeQuery(); + + Collection rowids = Lists.newArrayList(); + + while (fetched.next()) { + long id = fetched.getLong("ID"); + rowids.add(Long.valueOf(id)); + RequestSummaryDataTO aux = new RequestSummaryDataTO(); + aux.setPrimaryKey(id); + aux.setRequestType(fetched.getString("config_RequestTypeID")); + aux.setRequestToken(fetched.getString("r_token")); + aux.setClientDN(fetched.getString("client_dn")); + aux.setTimestamp(fetched.getTimestamp("timeStamp")); + + /** + * This code is only for the 1.3.18. This is a workaround to get FQANs using the proxy field + * on request_queue. The FE use the proxy field of request_queue to insert a single FQAN + * string containing all FQAN separated by the "#" char. The proxy is a BLOB, hence it has + * to be properly converted in string. + */ + java.sql.Blob blob = fetched.getBlob("proxy"); + if (blob != null) { + byte[] bdata = blob.getBytes(1, (int) blob.length()); + aux.setVomsAttributes(new String(bdata)); + } + + results.add(aux); + } + + // transit state from SRM_REQUEST_QUEUED to SRM_REQUEST_INPROGRESS + if (!results.isEmpty()) { + String updateQuery = + "UPDATE request_queue SET status=?, errstring=? WHERE ID IN " + makeWhereString(rowids); + update = con.prepareStatement(updateQuery); + update.setInt(1, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + update.setString(2, "Request handled!"); + log.trace("REQUEST SUMMARY DAO - findNew: executing {}", update); + update.executeUpdate(); + } + + // commit and finish transaction + con.commit(); + + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO - findNew: Unable to complete picking. " + + "Error: {}. Rolling back!", e.getMessage(), e); + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + + } finally { + closeResultSet(fetched); + closeStatement(fetch); + closeStatement(update); + closeConnection(con); + } + + return results; + } + + /** + * Method used to signal in the DB that a request failed: the status of the request identified by + * the primary key index is transited to SRM_FAILURE, with the supplied explanation String. The + * supplied index is the primary key of the global request. In case of any error, nothing gets + * done and no exception is thrown, but proper error messages get logged. + */ + public void failRequest(long requestId, String explanation) { + + Connection con = null; + PreparedStatement ps = null; + try { + con = getConnection(); + ps = con.prepareStatement(UPDATE_REQUEST_STATUS_WHERE_ID_IS); + ps.setInt(1, statusCodeConverter.toDB(SRM_FAILURE)); + ps.setString(2, explanation); + ps.setLong(3, requestId); + log.trace("REQUEST SUMMARY DAO! failRequest executing: {}", ps); + ps.executeUpdate(); + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO! Unable to transit request identified by " + + "ID {} to SRM_FAILURE! Error: {}", requestId, e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + } + + /** + * Method used to signal in the DB that a PtGRequest failed. The global status transits to + * SRM_FAILURE, as well as that of each chunk associated to the request. The supplied explanation + * string is used both for the global status as well as for each individual chunk. The supplied + * index is the primary key of the global request. In case of any error, nothing gets done and no + * exception is thrown, but proper error messages get logged. + */ + public void failPtGRequest(long requestId, String explanation) { + + Connection con = null; + PreparedStatement updateReq = null; + PreparedStatement updateChunk = null; + + int failCode = statusCodeConverter.toDB(SRM_FAILURE); + try { + // start transaction + con = getManagedConnection(); + + // update global status + updateReq = con.prepareStatement(UPDATE_REQUEST_STATUS_WHERE_ID_IS); + updateReq.setInt(1, failCode); + updateReq.setString(2, explanation); + updateReq.setLong(3, requestId); + log.trace("REQUEST SUMMARY DAO! failPtGRequest executing: {}", updateReq); + updateReq.executeUpdate(); + + // update each chunk status + updateChunk = con.prepareStatement(UPDATE_REQUEST_GET_STATUS_WHERE_ID_IS); + updateChunk.setInt(1, failCode); + updateChunk.setString(2, explanation); + updateChunk.setLong(3, requestId); + log.trace("REQUEST SUMMARY DAO! failPtGRequest executing: {}", updateChunk); + updateChunk.executeUpdate(); + + // commit and finish transaction + con.commit(); + } catch (SQLException e) { + log.error( + "REQUEST SUMMARY DAO! Unable to transit PtG request identified " + + "by ID {} to SRM_FAILURE! Error: {}\nRolling back...", + requestId, e.getMessage(), e); + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeStatement(updateReq); + closeStatement(updateChunk); + closeConnection(con); + } + } + + /** + * Method used to signal in the DB that a PtPRequest failed. The global status transits to + * SRM_FAILURE, as well as that of each chunk associated to the request. The supplied explanation + * string is used both for the global status as well as for each individual chunk. The supplied + * index is the primary key of the global request. In case of any error, nothing gets done and no + * exception is thrown, but proper error messagges get logged. + */ + public void failPtPRequest(long requestId, String explanation) { + + Connection con = null; + PreparedStatement updateReq = null; + PreparedStatement updateChunk = null; + int failCode = statusCodeConverter.toDB(SRM_FAILURE); + try { + // start transaction + con = getManagedConnection(); + + // update global status + updateReq = con.prepareStatement(UPDATE_REQUEST_STATUS_WHERE_ID_IS); + updateReq.setInt(1, failCode); + updateReq.setString(2, explanation); + updateReq.setLong(3, requestId); + log.trace("REQUEST SUMMARY DAO! failPtPRequest executing: {}", updateReq); + updateReq.executeUpdate(); + + // update each chunk status + updateChunk = con.prepareStatement(UPDATE_REQUEST_PUT_STATUS_WHERE_ID_IS); + updateChunk.setInt(1, failCode); + updateChunk.setString(2, explanation); + updateChunk.setLong(3, requestId); + log.trace("REQUEST SUMMARY DAO! failPtPRequest executing: {}", updateChunk); + updateChunk.executeUpdate(); + + // commit and finish transaction + con.commit(); + + } catch (SQLException e) { + log.error( + "REQUEST SUMMARY DAO! Unable to transit PtP request identified " + + "by ID {} to SRM_FAILURE! Error: {}\nRolling back...", + requestId, e.getMessage(), e); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeStatement(updateReq); + closeStatement(updateChunk); + closeConnection(con); + } + } + + /** + * Method used to update the global status of the request identified by the RequestToken rt. It + * gets updated the supplied status, with the supplied explanation String. If the supplied request + * token does not exist, nothing happens. + */ + public void updateGlobalStatus(TRequestToken requestToken, TStatusCode status, + String explanation) { + + Connection con = null; + PreparedStatement update = null; + try { + con = getConnection(); + update = con.prepareStatement(UPDATE_REQUEST_STATUS_WHERE_TOKEN_IS); + update.setInt(1, statusCodeConverter.toDB(status)); + update.setString(2, explanation); + update.setString(3, requestToken.getValue()); + log.trace("REQUEST SUMMARY DAO - updateGlobalStatus: executing {}", update); + update.executeUpdate(); + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(update); + closeConnection(con); + } + } + + public void updateGlobalStatusOnMatchingGlobalStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation) { + + Connection con = null; + PreparedStatement update = null; + try { + con = getConnection(); + update = con.prepareStatement(UPDATE_REQUEST_STATUS_WHERE_TOKEN_AND_STATUS_ARE); + update.setInt(1, statusCodeConverter.toDB(newStatusCode)); + update.setString(2, explanation); + update.setString(3, requestToken.getValue()); + update.setInt(4, statusCodeConverter.toDB(expectedStatusCode)); + log.trace("REQUEST SUMMARY DAO - updateGlobalStatusOnMatchingGlobalStatus: executing {}", + update); + update.executeUpdate(); + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO: {}", e.getMessage(), e); + } finally { + closeStatement(update); + closeConnection(con); + } + } + + /** + * Method used to update the global status of the request identified by the RequestToken rt. It + * gets updated the supplied status, with the supplied explanation String and pin and file + * lifetimes are updated in order to start the countdown from now. If the supplied request token + * does not exist, nothing happens. + */ + public void updateGlobalStatusPinFileLifetime(TRequestToken requestToken, TStatusCode status, + String explanation) { + + Connection con = null; + PreparedStatement update = null; + + try { + con = getConnection(); + update = con.prepareStatement(UPDATE_REQUEST_STATUS_AND_PINLIFETIME_WHERE_TOKEN_IS); + update.setInt(1, statusCodeConverter.toDB(status)); + update.setString(2, explanation); + update.setString(3, requestToken.getValue()); + log.trace("REQUEST SUMMARY DAO - updateGlobalStatus: executing {}", update); + update.executeUpdate(); + + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(update); + closeConnection(con); + } + } + + /** + * Method used to transit the status of a request that is in SRM_REQUEST_QUEUED state, to + * SRM_ABORTED. All files associated with the request will also get their status changed to + * SRM_ABORTED. If the supplied token is null, or not found, or not in the SRM_REQUEST_QUEUED + * state, then nothing happens. + */ + public void abortRequest(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement update = null; + PreparedStatement query = null; + ResultSet rs = null; + + try { + con = getManagedConnection(); + + query = con.prepareStatement(SELECT_REQUEST_WHERE_TOKEN_AND_STATUS); + query.setString(1, requestToken.getValue()); + query.setInt(2, statusCodeConverter.toDB(SRM_REQUEST_QUEUED)); + log.trace("REQUEST SUMMARY DAO - abortRequest - {}", query); + rs = query.executeQuery(); + + if (rs.next()) { + long id = rs.getLong("ID"); + String type = rs.getString("config_RequestTypeID"); + update = con.prepareStatement(UPDATE_REQUEST_STATUS_WHERE_ID_IS); + update.setInt(1, statusCodeConverter.toDB(SRM_ABORTED)); + update.setString(2, "User aborted request!"); + update.setLong(3, id); + log.trace("REQUEST SUMMARY DAO - abortRequest - {}", update); + update.executeUpdate(); + + // update single chunk file statuses + TRequestType rtyp = requestTypeConverter.toSTORM(type); + if (EMPTY.equals(rtyp)) { + log.error("REQUEST SUMMARY DAO - Unable to complete abortRequest: " + + "could not update file statuses because the request type could " + + "not be translated from the DB!"); + con.rollback(); + } else { + if (PREPARE_TO_GET.equals(rtyp)) { + update = con.prepareStatement(UPDATE_REQUEST_GET_STATUS_WHERE_ID_IS); + } else if (PREPARE_TO_PUT.equals(rtyp)) { + update = con.prepareStatement(UPDATE_REQUEST_PUT_STATUS_WHERE_ID_IS); + } else { + update = con.prepareStatement(UPDATE_REQUEST_BOL_STATUS_WHERE_ID_IS); + } + update.setInt(1, statusCodeConverter.toDB(SRM_ABORTED)); + update.setString(2, "User aborted request!"); + update.setLong(3, id); + log.trace("REQUEST SUMMARY DAO - abortRequest - {}", update); + update.executeUpdate(); + con.commit(); + } + } else { + con.rollback(); + } + } catch (SQLException e) { + + log.error("REQUEST SUMMARY DAO - abortRequest: {}", e.getMessage(), e); + e.printStackTrace(); + + } finally { + closeResultSet(rs); + closeStatement(update); + closeStatement(query); + closeConnection(con); + } + } + + /** + * Method used to transit the status of a request that is in SRM_REQUEST_INPROGRESS state, to + * SRM_ABORTED. All files associated with the request will also get their status changed to + * SRM_ABORTED. If the supplied token is null, or not found, or not in the SRM_REQUEST_INPROGRESS + * state, then nothing happens. + */ + public void abortInProgressRequest(TRequestToken rt) { + + Connection con = null; + PreparedStatement updateReq = null; + PreparedStatement updateChunk = null; + PreparedStatement query = null; + ResultSet rs = null; + + try { + con = getManagedConnection(); + + query = con.prepareStatement(SELECT_REQUEST_WHERE_TOKEN_AND_STATUS); + query.setString(1, rt.getValue()); + query.setInt(2, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + log.trace("REQUEST SUMMARY DAO - abortInProgressRequest - {}", query); + rs = query.executeQuery(); + + if (rs.next()) { + // token found... + // get ID + long id = rs.getLong("ID"); + String type = rs.getString("config_RequestTypeID"); + // update global request status + updateReq = con.prepareStatement(UPDATE_REQUEST_STATUS_WHERE_ID_IS); + updateReq.setInt(1, statusCodeConverter.toDB(SRM_ABORTED)); + updateReq.setString(2, "User aborted request!"); + updateReq.setLong(3, id); + log.trace("REQUEST SUMMARY DAO - abortInProgressRequest - {}", updateReq); + updateReq.executeUpdate(); + + // update single chunk file statuses + TRequestType rtyp = requestTypeConverter.toSTORM(type); + if (EMPTY.equals(rtyp)) { + log.error("REQUEST SUMMARY DAO - Unable to complete abortRequest: " + + "could not update file statuses because the request type could " + + "not be translated from the DB!"); + con.rollback(); + } else { + if (PREPARE_TO_GET.equals(rtyp)) { + updateChunk = con.prepareStatement(UPDATE_REQUEST_GET_STATUS_WHERE_ID_IS); + } else if (PREPARE_TO_PUT.equals(rtyp)) { + updateChunk = con.prepareStatement(UPDATE_REQUEST_PUT_STATUS_WHERE_ID_IS); + } else { + updateChunk = con.prepareStatement(UPDATE_REQUEST_BOL_STATUS_WHERE_ID_IS); + } + } + updateChunk.setInt(1, statusCodeConverter.toDB(SRM_ABORTED)); + updateChunk.setString(2, "User aborted request!"); + updateChunk.setLong(3, id); + log.trace("REQUEST SUMMARY DAO - abortInProgressRequest - {}", updateChunk); + updateChunk.executeUpdate(); + } else { + con.rollback(); + } + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO - abortInProgressRequest: {}", e.getMessage(), e); + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rs); + closeStatement(query); + closeStatement(updateReq); + closeStatement(updateChunk); + closeConnection(con); + } + } + + /** + * Method used to transit the status of chunks of a request that is in SRM_REQUEST_INPROGRESS + * state, to SRM_ABORTED. If the supplied token is null, or not found, or not in the + * SRM_REQUEST_INPROGRESS state, then nothing happens. + */ + public void abortChunksOfInProgressRequest(TRequestToken requestToken, Collection surls) { + + Connection con = null; + PreparedStatement update = null; + PreparedStatement query = null; + ResultSet rs = null; + + try { + con = getManagedConnection(); + query = con.prepareStatement(SELECT_REQUEST_WHERE_TOKEN_AND_STATUS); + query.setString(1, requestToken.getValue()); + query.setInt(2, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + log.trace("REQUEST SUMMARY DAO - abortChunksOfInProgressRequest - {}", query); + rs = query.executeQuery(); + + if (rs.next()) { + long id = rs.getLong("ID"); + String type = rs.getString("config_RequestTypeID"); + // update single chunk file statuses + TRequestType rtyp = requestTypeConverter.toSTORM(type); + if (EMPTY.equals(rtyp)) { + log.error("REQUEST SUMMARY DAO - Unable to complete abortRequest: " + + "could not update file statuses because the request type could " + + "not be translated from the DB!"); + con.rollback(); + } else { + String updateQuery; + if (PREPARE_TO_GET.equals(rtyp)) { + updateQuery = UPDATE_REQUEST_GET_STATUS_WHERE_ID_IS_AND_SURL_IN + makeInString(surls); + } else if (PREPARE_TO_PUT.equals(rtyp)) { + updateQuery = UPDATE_REQUEST_PUT_STATUS_WHERE_ID_IS_AND_SURL_IN + makeInString(surls); + } else { + updateQuery = UPDATE_REQUEST_BOL_STATUS_WHERE_ID_IS_AND_SURL_IN + makeInString(surls); + } + update = con.prepareStatement(updateQuery); + } + update.setInt(1, statusCodeConverter.toDB(SRM_ABORTED)); + update.setString(2, "User aborted request!"); + update.setLong(3, id); + log.trace("REQUEST SUMMARY DAO - abortChunksOfInProgressRequest - {}", update); + update.executeUpdate(); + con.commit(); + } + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO - abortChunksOfInProgressRequest: {}", e.getMessage(), e); + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rs); + closeStatement(query); + closeStatement(update); + closeConnection(con); + } + } + + /** + * Private method that returns a String of all SURLS in the collection of String. + */ + private String makeInString(Collection c) { + + StringBuilder sb = new StringBuilder("("); + for (Iterator i = c.iterator(); i.hasNext();) { + sb.append(i.next()); + if (i.hasNext()) { + sb.append(","); + } + } + sb.append(")"); + return sb.toString(); + } + + /** + * Method that returns the config_RequestTypeID field present in request_queue table, for the + * request with the specified request token rt. In case of any error, the empty String "" is + * returned. + */ + public TRequestType getRequestType(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement query = null; + ResultSet rs = null; + TRequestType result = EMPTY; + + try { + con = getConnection(); + query = con.prepareStatement(SELECT_REQUEST_WHERE_TOKEN_IS); + query.setString(1, requestToken.getValue()); + log.trace("REQUEST SUMMARY DAO - typeOf - {}", query); + rs = query.executeQuery(); + if (rs.next()) { + result = requestTypeConverter.toSTORM(rs.getString("config_RequestTypeID")); + } + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO - typeOf - {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(query); + closeConnection(con); + } + return result; + } + + /** + * Method that returns the config_RequestTypeID field present in request_queue table, for the + * request with the specified request token rt. In case of any error, the empty String "" is + * returned. + */ + public RequestSummaryDataTO find(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement query = null; + ResultSet rs = null; + RequestSummaryDataTO to = null; + + try { + con = getConnection(); + query = con.prepareStatement(SELECT_FULL_REQUEST_WHERE_TOKEN_IS); + query.setString(1, requestToken.getValue()); + rs = query.executeQuery(); + + if (rs.first()) { + to = new RequestSummaryDataTO(); + to.setPrimaryKey(rs.getLong("ID")); + to.setRequestType(rs.getString("config_RequestTypeID")); + to.setClientDN(rs.getString("client_dn")); + to.setUserToken(rs.getString("u_token")); + to.setRetrytime(rs.getInt("retrytime")); + to.setPinLifetime(rs.getInt("pinLifetime")); + to.setSpaceToken(rs.getString("s_token")); + to.setStatus(rs.getInt("status")); + to.setErrstring(rs.getString("errstring")); + to.setRequestToken(rs.getString("r_token")); + to.setRemainingTotalTime(rs.getInt("remainingTotalTime")); + to.setFileLifetime(rs.getInt("fileLifetime")); + to.setNbreqfiles(rs.getInt("nbreqfiles")); + to.setNumOfCompleted(rs.getInt("numOfCompleted")); + to.setNumOfWaiting(rs.getInt("numOfWaiting")); + to.setNumOfFailed(rs.getInt("numOfFailed")); + to.setTimestamp(rs.getTimestamp("timeStamp")); + + java.sql.Blob blob = rs.getBlob("proxy"); + if (blob != null) { + byte[] bdata = blob.getBytes(1, (int) blob.length()); + to.setVomsAttributes(new String(bdata)); + } + to.setDeferredStartTime(rs.getInt("deferredStartTime")); + to.setRemainingDeferredStartTime(rs.getInt("remainingDeferredStartTime")); + + if (rs.next()) { + log.warn("More than a row matches token {}", requestToken); + } + } + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO - find - {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(query); + closeConnection(con); + } + return to; + } + + /** + * Method that purges expired requests: it only removes up to a fixed value of expired requests at + * a time. The value is configured and obtained from the configuration property getPurgeBatchSize. + * A List of Strings with the request tokens removed is returned. In order to completely remove + * all expired requests, simply keep invoking this method until an empty List is returned. This + * batch processing is needed because there could be millions of expired requests which are likely + * to result in out-of-memory problems. Notice that in case of errors only error messages get + * logged. An empty List is also returned. + */ + public Collection purgeExpiredRequests(long expiredRequestTime, int purgeSize) { + + Connection con = null; + PreparedStatement fetch = null; + PreparedStatement deleteReq = null; + PreparedStatement deleteOrphans = null; + ResultSet rs = null; + Collection requestTokens = Lists.newArrayList(); + + try { + // start transaction + con = getManagedConnection(); + + fetch = con.prepareStatement(SELECT_PURGEABLE_REQUESTS_WITH_LIMIT); + fetch.setLong(1, expiredRequestTime); + fetch.setInt(2, statusCodeConverter.toDB(SRM_REQUEST_QUEUED)); + fetch.setInt(3, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + fetch.setInt(4, purgeSize); + log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - {}", fetch); + rs = fetch.executeQuery(); + + Collection ids = Lists.newArrayList(); + + while (rs.next()) { + requestTokens.add(rs.getString("r_token")); + ids.add(Long.valueOf(rs.getLong("ID"))); + } + + if (!ids.isEmpty()) { + // REMOVE BATCH OF EXPIRED REQUESTS! + + String deleteQuery = "DELETE FROM request_queue WHERE ID in " + makeWhereString(ids); + deleteReq = con.prepareStatement(deleteQuery); + log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - {}", deleteReq); + + int deleted = deleteReq.executeUpdate(); + if (deleted > 0) { + log.info("REQUEST SUMMARY DAO - purgeExpiredRequests - Deleted {} expired requests.", + deleted); + } else { + log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - No deleted expired requests."); + } + + deleteOrphans = con.prepareStatement(DELETE_ORPHANS_DIR_OPTION); + log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - {}", deleteOrphans); + deleted = deleteOrphans.executeUpdate(); + + if (deleted > 0) { + log.info("REQUEST SUMMARY DAO - purgeExpiredRequests - Deleted {} " + + "DirOption related to expired requests.", deleted); + } else { + log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - No Deleted " + + "DirOption related to expired requests."); + } + } + // commit and finish transaction + con.commit(); + + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO - purgeExpiredRequests - Rolling back because of error: {}", + e.getMessage(), e); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rs); + closeStatement(fetch); + closeStatement(deleteReq); + closeStatement(deleteOrphans); + closeConnection(con); + } + return requestTokens; + } + + /** + * Retrieve the total number of expired requests. + * + * @return + */ + public int getNumberExpired() { + + int rowCount = 0; + + Connection con = null; + PreparedStatement ps = null; + ResultSet rs = null; + + try { + // start transaction + con = getConnection(); + + ps = con.prepareStatement(COUNT_PURGEABLE_REQUESTS); + ps.setLong(1, StormConfiguration.getInstance().getExpiredRequestTime()); + ps.setInt(2, statusCodeConverter.toDB(SRM_REQUEST_QUEUED)); + ps.setInt(3, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + + log.trace("REQUEST SUMMARY DAO - Number of expired requests: {}", ps); + rs = ps.executeQuery(); + + // Get the number of rows from the result set + if (rs.next()) { + rowCount = rs.getInt(1); + } + log.debug("Nr of expired requests is: {}", rowCount); + + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO - purgeExpiredRequests - Rolling back because of error: {}", + e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(ps); + closeConnection(con); + } + + return rowCount; + + } + + /** + * Private method that returns a String of all IDs retrieved by the last SELECT. + */ + private String makeWhereString(Collection rowids) { + + StringBuilder sb = new StringBuilder("("); + for (Iterator i = rowids.iterator(); i.hasNext();) { + sb.append(i.next()); + if (i.hasNext()) { + sb.append(","); + } + } + sb.append(")"); + return sb.toString(); + } + +} diff --git a/src/main/java/it/grid/storm/catalogs/surl/SURLStatusDAO.java b/src/main/java/it/grid/storm/persistence/impl/mysql/SURLStatusDAOMySql.java similarity index 58% rename from src/main/java/it/grid/storm/catalogs/surl/SURLStatusDAO.java rename to src/main/java/it/grid/storm/persistence/impl/mysql/SURLStatusDAOMySql.java index 088641b38..7d1592072 100644 --- a/src/main/java/it/grid/storm/catalogs/surl/SURLStatusDAO.java +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/SURLStatusDAOMySql.java @@ -1,14 +1,29 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.catalogs.surl; +package it.grid.storm.persistence.impl.mysql; + +import static it.grid.storm.srm.types.TStatusCode.SRM_SPACE_AVAILABLE; +import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Maps; import it.grid.storm.catalogs.PtPChunkCatalog; import it.grid.storm.catalogs.RequestSummaryCatalog; -import it.grid.storm.catalogs.StatusCodeConverter; -import it.grid.storm.catalogs.StoRMDataSource; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.SURLStatusDAO; +import it.grid.storm.persistence.pool.impl.StormDbConnectionPool; import it.grid.storm.srm.types.InvalidTSURLAttributesException; import it.grid.storm.srm.types.TRequestToken; import it.grid.storm.srm.types.TRequestType; @@ -18,41 +33,46 @@ import it.grid.storm.synchcall.surl.ExpiredTokenException; import it.grid.storm.synchcall.surl.UnknownTokenException; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +public class SURLStatusDAOMySql extends AbstractDAO implements SURLStatusDAO { -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + public static final Logger LOGGER = LoggerFactory.getLogger(SURLStatusDAOMySql.class); -public class SURLStatusDAO { + private static SURLStatusDAO instance; - public static final Logger LOGGER = LoggerFactory - .getLogger(SURLStatusDAO.class); + public static synchronized SURLStatusDAO getInstance() { + if (instance == null) { + instance = new SURLStatusDAOMySql(); + } + return instance; + } - public boolean abortActivePtGsForSURL(GridUserInterface user, TSURL surl, - String explanation) { + private final StatusCodeConverter converter; + private final RequestSummaryCatalog requestSummaryCatalog; + private final PtPChunkCatalog ptpChunkCatalog; + + private SURLStatusDAOMySql() { + super(StormDbConnectionPool.getInstance()); + converter = StatusCodeConverter.getInstance(); + requestSummaryCatalog = RequestSummaryCatalog.getInstance(); + ptpChunkCatalog = PtPChunkCatalog.getInstance(); + } + + public boolean abortActivePtGsForSURL(GridUserInterface user, TSURL surl, String explanation) { surlSanityChecks(surl); - PreparedStatement stat = null; Connection con = null; + PreparedStatement stat = null; + int updateCount = 0; try { con = getConnection(); - String query = "UPDATE status_Get sg " - + "JOIN (request_Get rg, request_queue rq) " - + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=20, rq.status=20, sg.explanation=? " - + "WHERE rg.sourceSURL = ? and rg.sourceSURL_uniqueID = ? " - + "AND (sg.statusCode=22 OR sg.statusCode=17) "; + String query = "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) " + + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " + + "SET sg.statusCode=20, rq.status=20, sg.explanation=? " + + "WHERE rg.sourceSURL = ? and rg.sourceSURL_uniqueID = ? " + + "AND (sg.statusCode=22 OR sg.statusCode=17) "; if (user != null) { query += "AND rq.client_dn = ?"; @@ -67,43 +87,39 @@ public boolean abortActivePtGsForSURL(GridUserInterface user, TSURL surl, stat.setString(4, user.getDn()); } - final int updateCount = stat.executeUpdate(); - LOGGER.debug("abortActivePtGsForSURL: surl={}, numOfAbortedRequests={}", - surl, updateCount); - - return (updateCount != 0); + updateCount = stat.executeUpdate(); + LOGGER.debug("abortActivePtGsForSURL: surl={}, numOfAbortedRequests={}", surl, updateCount); } catch (SQLException e) { - String msg = String.format("abortActivePtGsForSURL: SQL error: %s", - e.getMessage()); - LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + String msg = String.format("abortActivePtGsForSURL: SQL error: %s", e.getMessage()); + LOGGER.error(msg, e); } finally { - closeStatetement(stat); + + closeStatement(stat); closeConnection(con); } + return (updateCount != 0); } - public boolean abortActivePtPsForSURL(GridUserInterface user, TSURL surl, - String explanation) { + public boolean abortActivePtPsForSURL(GridUserInterface user, TSURL surl, String explanation) { surlSanityChecks(surl); - PreparedStatement stat = null; Connection con = null; + PreparedStatement stat = null; + int updateCount = 0; try { con = getConnection(); - String query = "UPDATE status_Put sp " - + "JOIN (request_Put rp, request_queue rq) " - + "ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " - + "SET sp.statusCode=20, rq.status=20, sp.explanation=? " - + "WHERE rp.targetSURL = ? and rp.targetSURL_uniqueID = ? " - + "AND (sp.statusCode=24 OR sp.statusCode=17)"; + String query = "UPDATE status_Put sp JOIN (request_Put rp, request_queue rq) " + + "ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " + + "SET sp.statusCode=20, rq.status=20, sp.explanation=? " + + "WHERE rp.targetSURL = ? and rp.targetSURL_uniqueID = ? " + + "AND (sp.statusCode=24 OR sp.statusCode=17)"; if (user != null) { query += "AND rq.client_dn = ?"; @@ -118,36 +134,30 @@ public boolean abortActivePtPsForSURL(GridUserInterface user, TSURL surl, stat.setString(4, user.getDn()); } - final int updateCount = stat.executeUpdate(); - - LOGGER.debug("abortActivePtPsForSURL: surl={}, numOfAbortedRequests={}", - surl, updateCount); + updateCount = stat.executeUpdate(); - return (updateCount != 0); + LOGGER.debug("abortActivePtPsForSURL: surl={}, numOfAbortedRequests={}", surl, updateCount); } catch (SQLException e) { - String msg = String.format("abortActivePtPsForSURL: SQL error: %s", - e.getMessage()); - LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + String msg = String.format("abortActivePtPsForSURL: SQL error: %s", e.getMessage()); + LOGGER.error(msg, e); } finally { - closeStatetement(stat); + + closeStatement(stat); closeConnection(con); } - + return (updateCount != 0); } - private Map buildStatusMap(ResultSet rs) - throws SQLException { + private Map buildStatusMap(ResultSet rs) throws SQLException { if (rs == null) { throw new IllegalArgumentException("rs cannot be null"); } Map statusMap = new HashMap(); - StatusCodeConverter converter = StatusCodeConverter.getInstance(); while (rs.next()) { TSURL surl = surlFromString(rs.getString(1)); TStatusCode sc = converter.toSTORM(rs.getInt(2)); @@ -159,42 +169,8 @@ private Map buildStatusMap(ResultSet rs) } - private void closeConnection(Connection conn) { - - if (conn != null) { - try { - conn.close(); - } catch (SQLException e) { - LOGGER.error("Error closing connection: {}.", e.getMessage(), e); - } - } - } - - private void closeResultSet(ResultSet rs) { - - if (rs != null) { - - try { - rs.close(); - } catch (SQLException e) { - LOGGER.error("Error closing result set: {}", e.getMessage(), e); - } - } - } - - private void closeStatetement(Statement stat) { - - if (stat != null) { - try { - stat.close(); - } catch (SQLException e) { - LOGGER.error("Error closing statement: {}.", e.getMessage(), e); - } - } - } - - private Map filterSURLStatuses( - Map statuses, List surls) { + private Map filterSURLStatuses(Map statuses, + List surls) { if (surls == null) { return statuses; @@ -213,8 +189,8 @@ private Map filterSURLStatuses( // Add a failure state for the surls that were // requested but are not linked to the token for (TSURL s : surlsCopy) { - statuses.put(s, new TReturnStatus(TStatusCode.SRM_FAILURE, - "SURL not linked to passed request token.")); + statuses.put(s, + new TReturnStatus(TStatusCode.SRM_FAILURE, "SURL not linked to passed request token.")); } return statuses; @@ -227,47 +203,40 @@ private Map getBoLSURLStatuses(TRequestToken token) { ResultSet rs = null; PreparedStatement stat = null; Connection con = null; + Map result = null; try { con = getConnection(); String query = "SELECT rb.sourceSURL, sb.statusCode " - + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " - + "ON (rb.request_queueID = rq.ID AND sb.request_BoLID = rb.ID)" - + "WHERE ( rq.r_token = ? )"; + + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " + + "ON (rb.request_queueID = rq.ID AND sb.request_BoLID = rb.ID)" + + "WHERE ( rq.r_token = ? )"; stat = con.prepareStatement(query); stat.setString(1, token.getValue()); rs = stat.executeQuery(); - return buildStatusMap(rs); + result = buildStatusMap(rs); } catch (SQLException e) { - String msg = String.format("getBoLSURLStatuses: SQL error: %s", - e.getMessage()); - + String msg = String.format("getBoLSURLStatuses: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + result = Maps.newHashMap(); } finally { - closeStatetement(stat); + closeResultSet(rs); + closeStatement(stat); closeConnection(con); } + return result; } - private Connection getConnection() throws SQLException { - - if (StoRMDataSource.getInstance() == null) { - throw new IllegalStateException("SToRM Data source not initialized!"); - } - return StoRMDataSource.getInstance().getConnection(); - } - - public Map getPinnedSURLsForUser( - GridUserInterface user, List surls) { + public Map getPinnedSURLsForUser(GridUserInterface user, + List surls) { if (user == null) { throw new NullPointerException("getPinnedSURLsForUser: null user!"); @@ -276,23 +245,22 @@ public Map getPinnedSURLsForUser( ResultSet rs = null; PreparedStatement stat = null; Connection con = null; - - StatusCodeConverter converter = StatusCodeConverter.getInstance(); + Map result = Maps.newHashMap(); try { con = getConnection(); String query = "SELECT rg.sourceSURL, rg.sourceSURL_uniqueID, sg.statusCode " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " - + "WHERE ( sg.statusCode = 22 and rq.client_dn = ? )"; + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " + + "WHERE ( sg.statusCode = 22 and rq.client_dn = ? )"; stat = con.prepareStatement(query); stat.setString(1, user.getDn()); rs = stat.executeQuery(); - Map statusMap = new HashMap(); + Map statusMap = Maps.newHashMap(); while (rs.next()) { @@ -302,22 +270,25 @@ public Map getPinnedSURLsForUser( } - return filterSURLStatuses(statusMap, surls); + result = filterSURLStatuses(statusMap, surls); } catch (SQLException e) { - String msg = String.format("getPinnedSURLsForUser: SQL error: %s", - e.getMessage()); + + String msg = String.format("getPinnedSURLsForUser: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + } finally { - closeStatetement(stat); + + closeStatement(stat); closeResultSet(rs); closeConnection(con); } + + return result; } - public Map getPinnedSURLsForUser( - GridUserInterface user, TRequestToken token, List surls) { + public Map getPinnedSURLsForUser(GridUserInterface user, + TRequestToken token, List surls) { userSanityChecks(user); tokenSanityChecks(token); @@ -327,22 +298,22 @@ public Map getPinnedSURLsForUser( PreparedStatement stat = null; Connection con = null; - StatusCodeConverter converter = StatusCodeConverter.getInstance(); + Map result = Maps.newHashMap(); try { con = getConnection(); String query = "SELECT rg.sourceSURL, rg.sourceSURL_uniqueID, sg.statusCode " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " - + "WHERE ( sg.statusCode = 22 and rq.client_dn = ? and rq.r_token = ? )"; + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " + + "WHERE ( sg.statusCode = 22 and rq.client_dn = ? and rq.r_token = ? )"; stat = con.prepareStatement(query); stat.setString(1, user.getDn()); stat.setString(2, token.getValue()); rs = stat.executeQuery(); - Map statusMap = new HashMap(); + Map statusMap = Maps.newHashMap(); while (rs.next()) { @@ -352,18 +323,20 @@ public Map getPinnedSURLsForUser( } - return filterSURLStatuses(statusMap, surls); + result = filterSURLStatuses(statusMap, surls); } catch (SQLException e) { - String msg = String.format("getPinnedSURLsForUser: SQL error: %s", - e.getMessage()); + + String msg = String.format("getPinnedSURLsForUser: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + } finally { - closeStatetement(stat); + + closeStatement(stat); closeResultSet(rs); closeConnection(con); } + return result; } private Map getPtGSURLStatuses(TRequestToken token) { @@ -374,30 +347,34 @@ private Map getPtGSURLStatuses(TRequestToken token) { PreparedStatement stat = null; Connection con = null; + Map result = Maps.newHashMap(); + try { con = getConnection(); String query = "SELECT rg.sourceSURL, sg.statusCode " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID = rq.ID AND sg.request_GetID=rg.ID) " - + "WHERE ( rq.r_token = ? )"; + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID = rq.ID AND sg.request_GetID=rg.ID) " + + "WHERE ( rq.r_token = ? )"; stat = con.prepareStatement(query); stat.setString(1, token.getValue()); rs = stat.executeQuery(); - return buildStatusMap(rs); + result = buildStatusMap(rs); } catch (SQLException e) { - String msg = String.format("getPtGSURLStatuses: SQL error: %s", - e.getMessage()); + + String msg = String.format("getPtGSURLStatuses: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + } finally { - closeStatetement(stat); + + closeStatement(stat); closeResultSet(rs); closeConnection(con); } + return result; } private Map getPtPSURLStatuses(TRequestToken token) { @@ -407,39 +384,39 @@ private Map getPtPSURLStatuses(TRequestToken token) { ResultSet rs = null; PreparedStatement stat = null; Connection con = null; + Map result = Maps.newHashMap(); try { con = getConnection(); String query = "SELECT rp.targetSURL, sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID = rq.ID AND sp.request_PutID = rp.ID)" - + "WHERE ( rq.r_token = ? )"; + + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " + + "ON (rp.request_queueID = rq.ID AND sp.request_PutID = rp.ID)" + + "WHERE ( rq.r_token = ? )"; stat = con.prepareStatement(query); stat.setString(1, token.getValue()); rs = stat.executeQuery(); - return buildStatusMap(rs); + result = buildStatusMap(rs); } catch (SQLException e) { - String msg = String.format("getPtPSURLStatuses: SQL error: %s", - e.getMessage()); + String msg = String.format("getPtPSURLStatuses: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); } finally { - closeStatetement(stat); + + closeStatement(stat); closeResultSet(rs); closeConnection(con); } - + return result; } public Map getSURLStatuses(TRequestToken token) { - TRequestType rt = RequestSummaryCatalog.getInstance().typeOf(token); + TRequestType rt = requestSummaryCatalog.typeOf(token); if (rt.isEmpty()) throw new UnknownTokenException(token.getValue()); @@ -448,41 +425,38 @@ public Map getSURLStatuses(TRequestToken token) { throw new ExpiredTokenException(token.getValue()); switch (rt) { - case PREPARE_TO_GET: - return getPtGSURLStatuses(token); + case PREPARE_TO_GET: + return getPtGSURLStatuses(token); - case PREPARE_TO_PUT: - return getPtPSURLStatuses(token); + case PREPARE_TO_PUT: + return getPtPSURLStatuses(token); - case BRING_ON_LINE: - return getBoLSURLStatuses(token); + case BRING_ON_LINE: + return getBoLSURLStatuses(token); - default: - String msg = String.format("Invalid request type for token %s: %s", - token, rt.toString()); - throw new IllegalArgumentException(msg); + default: + String msg = String.format("Invalid request type for token %s: %s", token, rt.toString()); + throw new IllegalArgumentException(msg); } } - public Map getSURLStatuses(TRequestToken token, - List surls) { + public Map getSURLStatuses(TRequestToken token, List surls) { - TRequestType rt = RequestSummaryCatalog.getInstance().typeOf(token); + TRequestType rt = requestSummaryCatalog.typeOf(token); switch (rt) { - case PREPARE_TO_GET: - return filterSURLStatuses(getPtGSURLStatuses(token), surls); + case PREPARE_TO_GET: + return filterSURLStatuses(getPtGSURLStatuses(token), surls); - case PREPARE_TO_PUT: - return filterSURLStatuses(getPtPSURLStatuses(token), surls); + case PREPARE_TO_PUT: + return filterSURLStatuses(getPtPSURLStatuses(token), surls); - case BRING_ON_LINE: - return filterSURLStatuses(getBoLSURLStatuses(token), surls); + case BRING_ON_LINE: + return filterSURLStatuses(getBoLSURLStatuses(token), surls); - default: - String msg = String.format("Invalid request type for token %s: %s", - token, rt.toString()); - throw new IllegalArgumentException(msg); + default: + String msg = String.format("Invalid request type for token %s: %s", token, rt.toString()); + throw new IllegalArgumentException(msg); } } @@ -491,9 +465,8 @@ public int markSURLsReadyForRead(TRequestToken token, List surls) { tokenSanityChecks(token); surlSanityChecks(surls); - // I am not reimplementing the whole catalog functions - return PtPChunkCatalog.getInstance().updateFromPreviousStatus(token, surls, - TStatusCode.SRM_SPACE_AVAILABLE, TStatusCode.SRM_SUCCESS); + // I am not re-implementing the whole catalog functions + return ptpChunkCatalog.updateFromPreviousStatus(token, surls, SRM_SPACE_AVAILABLE, SRM_SUCCESS); } @@ -538,25 +511,25 @@ public void releaseSURL(TSURL surl) { try { con = getConnection(); - String query = "UPDATE status_Get sg " - + "JOIN (request_Get rg, request_queue rq) " - + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=21" - + "WHERE (sg.statusCode=22 OR sg.statusCode=0) " - + "AND rg.sourceSURL = ? and rg.sourceSURL_uniqueID = ?"; + String query = "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) " + + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID SET sg.statusCode=21" + + "WHERE (sg.statusCode=22 OR sg.statusCode=0) " + + "AND rg.sourceSURL = ? and rg.sourceSURL_uniqueID = ?"; stat = con.prepareStatement(query); stat.setString(1, surl.getSURLString()); stat.setInt(2, surl.uniqueId()); stat.executeUpdate(); + } catch (SQLException e) { + String msg = String.format("releaseSURL: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); } finally { - closeStatetement(stat); + + closeStatement(stat); closeConnection(con); } } @@ -572,14 +545,11 @@ public void releaseSURLs(GridUserInterface user, List surls) { try { con = getConnection(); - String query = "UPDATE status_Get sg " - + "JOIN (request_Get rg, request_queue rq) " - + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=21 " - + "WHERE (sg.statusCode=22 OR sg.statusCode=0) " - + "AND rg.sourceSURL_uniqueID IN (" + quoteSURLUniqueIDs(surls) + ") " - + "AND rg.sourceSURL IN (" + quoteSURLList(surls) + ") " - + "AND rq.client_dn = ?"; + String query = "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) " + + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID SET sg.statusCode=21 " + + "WHERE (sg.statusCode=22 OR sg.statusCode=0) AND rg.sourceSURL_uniqueID IN (" + + quoteSURLUniqueIDs(surls) + ") AND rg.sourceSURL IN (" + quoteSURLList(surls) + + ") AND rq.client_dn = ?"; stat = con.prepareStatement(query); stat.setString(1, user.getDn()); @@ -588,15 +558,15 @@ public void releaseSURLs(GridUserInterface user, List surls) { LOGGER.debug("releaseSURLs: released {} surls", releasedSURLsCount); } catch (SQLException e) { + String msg = String.format("releaseSURLs: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); } finally { - closeStatetement(stat); + + closeStatement(stat); closeConnection(con); } - } public void releaseSURLs(List surls) { @@ -609,24 +579,22 @@ public void releaseSURLs(List surls) { try { con = getConnection(); - String query = "UPDATE status_Get sg " - + "JOIN (request_Get rg, request_queue rq) " - + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=21 " - + "WHERE (sg.statusCode=22 OR sg.statusCode=0) " - + "AND rg.sourceSURL_uniqueID IN (" + quoteSURLUniqueIDs(surls) + ") " - + "AND rg.sourceSURL IN (" + quoteSURLList(surls) + ")"; + String query = "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) " + + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID SET sg.statusCode=21 " + + "WHERE (sg.statusCode=22 OR sg.statusCode=0) AND rg.sourceSURL_uniqueID IN (" + + quoteSURLUniqueIDs(surls) + ") AND rg.sourceSURL IN (" + quoteSURLList(surls) + ")"; stat = con.prepareStatement(query); stat.executeUpdate(); } catch (SQLException e) { + String msg = String.format("releaseSURLs: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); } finally { - closeStatetement(stat); + + closeStatement(stat); closeConnection(con); } } @@ -642,26 +610,24 @@ public void releaseSURLs(TRequestToken token, List surls) { try { con = getConnection(); - String query = "UPDATE status_Get sg " - + "JOIN (request_Get rg, request_queue rq) " - + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=21 " - + "WHERE (sg.statusCode=22 OR sg.statusCode=0) " - + "AND rg.sourceSURL_uniqueID IN (" + quoteSURLUniqueIDs(surls) + ") " - + "AND rg.sourceSURL IN (" + quoteSURLList(surls) + ") " - + "AND rq.r_token = ?"; + String query = "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) " + + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID SET sg.statusCode=21 " + + "WHERE (sg.statusCode=22 OR sg.statusCode=0) AND rg.sourceSURL_uniqueID IN (" + + quoteSURLUniqueIDs(surls) + ") AND rg.sourceSURL IN (" + quoteSURLList(surls) + + ") AND rq.r_token = ?"; stat = con.prepareStatement(query); stat.setString(1, token.getValue()); stat.executeUpdate(); } catch (SQLException e) { + String msg = String.format("releaseSURLs: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); } finally { - closeStatetement(stat); + + closeStatement(stat); closeConnection(con); } } @@ -673,8 +639,7 @@ private TSURL surlFromString(String s) { return TSURL.makeFromStringWellFormed(s); } catch (InvalidTSURLAttributesException e) { - throw new IllegalArgumentException("Error creating surl from string: " - + s, e); + throw new IllegalArgumentException("Error creating surl from string: " + s, e); } } @@ -685,6 +650,7 @@ public boolean surlHasOngoingPtGs(TSURL surl) { ResultSet rs = null; PreparedStatement stat = null; Connection con = null; + boolean result = false; try { con = getConnection(); @@ -692,27 +658,28 @@ public boolean surlHasOngoingPtGs(TSURL surl) { // We basically check whether there are active requests // that have the SURL in SRM_FILE_PINNED status String query = "SELECT rq.ID, rg.ID, sg.statusCode " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID = rq.ID AND sg.request_GetID = rg.ID) " - + "WHERE ( rg.sourceSURL = ? and rg.sourceSURL_uniqueID = ? " - + "and sg.statusCode = 22 )"; + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID = rq.ID AND sg.request_GetID = rg.ID) " + + "WHERE ( rg.sourceSURL_uniqueID = ? and sg.statusCode = 22 )"; stat = con.prepareStatement(query); - stat.setString(1, surl.getSURLString()); - stat.setInt(2, surl.uniqueId()); + stat.setInt(1, surl.uniqueId()); rs = stat.executeQuery(); - return rs.next(); + result = rs.next(); + } catch (SQLException e) { - String msg = String.format("surlHasOngoingPtGs: SQL error: %s", - e.getMessage()); + + String msg = String.format("surlHasOngoingPtGs: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + } finally { - closeStatetement(stat); + + closeStatement(stat); closeResultSet(rs); closeConnection(con); } + return result; } public boolean surlHasOngoingPtPs(TSURL surl, TRequestToken ptpRequestToken) { @@ -722,6 +689,7 @@ public boolean surlHasOngoingPtPs(TSURL surl, TRequestToken ptpRequestToken) { ResultSet rs = null; PreparedStatement stat = null; Connection con = null; + boolean result = false; try { @@ -729,36 +697,36 @@ public boolean surlHasOngoingPtPs(TSURL surl, TRequestToken ptpRequestToken) { // We basically check whether there are active requests // that have the SURL in SRM_SPACE_AVAILABLE status String query = "SELECT rq.ID, rp.ID, sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " - + "WHERE ( rp.targetSURL = ? and rp.targetSURL_uniqueID = ? " - + "and sp.statusCode=24 )"; + + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " + + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " + + "WHERE ( rp.targetSURL_uniqueID = ? and sp.statusCode = 24 )"; if (ptpRequestToken != null) { - query += " AND rq.r_token != ?"; + query += " AND ( rq.r_token != ? )"; } stat = con.prepareStatement(query); - stat.setString(1, surl.getSURLString()); - stat.setInt(2, surl.uniqueId()); + stat.setInt(1, surl.uniqueId()); if (ptpRequestToken != null) { - stat.setString(3, ptpRequestToken.getValue()); + stat.setString(2, ptpRequestToken.getValue()); } rs = stat.executeQuery(); - return rs.next(); + result = rs.next(); + } catch (SQLException e) { - String msg = String.format("surlHasOngoingPtPs: SQL error: %s", - e.getMessage()); + + String msg = String.format("surlHasOngoingPtPs: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + } finally { - closeStatetement(stat); + + closeStatement(stat); closeResultSet(rs); closeConnection(con); } - + return result; } private void surlSanityChecks(List surls) { diff --git a/src/main/java/it/grid/storm/persistence/impl/mysql/StorageSpaceDAOMySql.java b/src/main/java/it/grid/storm/persistence/impl/mysql/StorageSpaceDAOMySql.java index d8d764ac4..2f7561813 100644 --- a/src/main/java/it/grid/storm/persistence/impl/mysql/StorageSpaceDAOMySql.java +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/StorageSpaceDAOMySql.java @@ -4,14 +4,6 @@ */ package it.grid.storm.persistence.impl.mysql; -import it.grid.storm.griduser.GridUserInterface; -import it.grid.storm.persistence.PersistenceDirector; -import it.grid.storm.persistence.dao.AbstractDAO; -import it.grid.storm.persistence.dao.StorageSpaceDAO; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.model.StorageSpaceTO; -import it.grid.storm.persistence.util.helper.StorageSpaceSQLHelper; - import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; @@ -23,604 +15,588 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** - * find = con.prepareStatement( - * "SELECT storm_get_filereq.rowid, storm_req.r_token, storm_get_filereq.from_surl, storm_get_filereq.lifetime, storm_get_filereq.s_token, storm_get_filereq.flags, storm_req.protocol, storm_get_filereq.actual_size, storm_get_filereq.status, storm_get_filereq.errstring, storm_get_filereq.pfn FROM storm_get_filereq, storm_req WHERE storm_get_filereq.r_token=storm_req.r_token AND storm_get_filereq.r_token=?" - * ); - **/ - -public class StorageSpaceDAOMySql extends AbstractDAO implements - StorageSpaceDAO { - - private static final Logger log = LoggerFactory - .getLogger(StorageSpaceDAOMySql.class); - - private StorageSpaceSQLHelper helper; - - /** - * CONSTRUCTOR - */ - public StorageSpaceDAOMySql() { - - helper = new StorageSpaceSQLHelper(PersistenceDirector.getDataBase() - .getDbmsVendor()); - } - - /** - * addStorageSpace - * - * @param ss - * StorageSpace - * @throws DataAccessException - */ - - public void addStorageSpace(StorageSpaceTO ss) throws DataAccessException { - - Connection conn = getConnection(); - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.insertQuery(conn, ss); - log.info("INSERT query = {}", prepStatement.toString()); - - int res = prepStatement.executeUpdate(); - log.debug("INSERT result = {}", res); - if (res <= 0) { - log - .error("No row inserted for statement : {}", prepStatement.toString()); - throw new DataAccessException("No rows inserted for Storage Space"); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing INSERT query", e); - } finally { - releaseConnection(null, prepStatement, conn); - } - } - - /** - * getStorageSpaceById - * - * @param ssId - * Long - * @return StorageSpace - * @throws DataAccessException - */ - public StorageSpaceTO getStorageSpaceById(Long ssId) - throws DataAccessException { - - throw new DataAccessException("getStorageSpaceById: Unimplemented method!"); - } - - public Collection findAll() throws DataAccessException { - - throw new DataAccessException("findAll: Unimplemented method!"); - } - - /** - * Returns a Collection of StorageSpaceTO owned by 'user' and with the - * specified alias ('spaceAlias'). 'spaceAlias' can be NULL or empty and in - * these cases a Collection of all the StorageSpaceTO owned by 'user' is - * returned. - * - * @param owner - * VomsGridUser. - * @param spaceAlias - * String. - * @return Collection of StorageSpaceTO. - * @throws DataAccessException - */ - public Collection getStorageSpaceByOwner( - GridUserInterface owner, String spaceAlias) throws DataAccessException { - - StorageSpaceTO ssTO = null; - Collection result = new LinkedList(); - - Connection conn = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.selectBySpaceAliasQuery(conn, owner, spaceAlias); - log.debug("DB query = {}", prepStatement.toString()); - - res = prepStatement.executeQuery(); - - log.debug("query result = {}", res); - if (res.first() == false) { - log.debug("No rows found for query : {}", prepStatement.toString()); - } else { - do { - ssTO = helper.makeStorageSpaceTO(res); - result.add(ssTO); - } while (res.next()); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DB query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return result; - } - - /** - * Returns a Collection of StorageSpaceTO owned by 'VO'. - * - * @param voname - * Vo. - * @return Collection of StorageSpaceTO. - * @throws DataAccessException - */ - - public Collection getStorageSpaceBySpaceType(String stype) - throws DataAccessException { - - StorageSpaceTO ssTO = null; - Collection result = new LinkedList(); - - PreparedStatement prepStatement = null; - - Connection conn = getConnection(); - ResultSet res = null; - - try { - prepStatement = helper.selectBySpaceType(conn, stype); - log.debug("DB query = {}", prepStatement.toString()); - - res = prepStatement.executeQuery(); - log.debug("query result = {}", res); - if (res.first() == false) { - log.info("No rows found for query : {}", prepStatement.toString()); - } else { - // Fetch each row from the result set - do { - ssTO = helper.makeStorageSpaceTO(res); - result.add(ssTO); - } while (res.next()); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DB query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return result; - } - - /** - * Returns a Collection of StorageSpaceTO with the specified alias - * ('spaceAlias'). 'spaceAlias' can not be be NULL or empty. - * - * @param spaceAlias - * String. - * @return Collection of StorageSpaceTO. - * @throws DataAccessException - */ - public Collection getStorageSpaceByAliasOnly(String spaceAlias) - throws DataAccessException { - - StorageSpaceTO ssTO = null; - Collection result = new LinkedList(); - Connection conn = getConnection(); - ResultSet res = null; - - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.selectBySpaceAliasOnlyQuery(conn, spaceAlias); - log.debug("DB query = {}" , prepStatement.toString()); - - res = prepStatement.executeQuery(); - log.debug("query result = {}" , res); - - if (res.first() == false) { - log.info("No rows found for query : {}" , prepStatement.toString()); - } else { - // Fetch each row from the result set - do { - ssTO = helper.makeStorageSpaceTO(res); - result.add(ssTO); - } while (res.next()); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DB query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return result; - } - - /** - * getStorageSpaceByToken - * - * @param token - * TSpaceToken - * @return StorageSpace , null if not row found on that token - * @throws DataAccessException - */ - public StorageSpaceTO getStorageSpaceByToken(String token) - throws DataAccessException { - - StorageSpaceTO ssTO = null; - - Connection conn = getConnection(); - ResultSet res = null; - - PreparedStatement prepStatement = null; - try { - prepStatement = helper.selectByTokenQuery(conn, token); - log.debug("SELECT query = {}" , prepStatement.toString()); - - res = prepStatement.executeQuery(); - - log.debug("SELECT result = {}" , res); - if (res.first() == false) { - log.info("No rows found for query : {}" , prepStatement.toString()); - } else { - // take the first - ssTO = helper.makeStorageSpaceTO(res); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing INSERT query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return ssTO; - } - - @Override - public Collection getStorageSpaceByUnavailableUsedSpace( - long unavailableSizeValue) throws DataAccessException { - - StorageSpaceTO ssTO = null; - Collection result = new LinkedList(); - - Connection conn = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.selectByUnavailableUsedSpaceSizeQuery(conn, - unavailableSizeValue); - log.debug("SELECT query = {}" , prepStatement.toString()); - - res = prepStatement.executeQuery(); - log.debug("SELECT result = {}" , res); - if (res.first() == false) { - log.info("No rows found for query : {}" , prepStatement.toString()); - } else { - // Fetch each row from the result set - do { - ssTO = helper.makeStorageSpaceTO(res); - result.add(ssTO); - } while (res.next()); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing INSERT query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return result; - } - - @Override - public Collection getStorageSpaceByPreviousLastUpdate( - Date lastUpdateTimestamp) throws DataAccessException { - - StorageSpaceTO ssTO = null; - Collection result = new LinkedList(); - - Connection conn = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.selectByPreviousOrNullLastUpdateQuery(conn, - lastUpdateTimestamp.getTime()); - log.debug("SELECT query = {}" , prepStatement.toString()); - - res = prepStatement.executeQuery(); - log.debug("SELECT result = {}" , res); - if (res.first() == false) { - log.info("No rows found for query : {}" , prepStatement.toString()); - } else { - // Fetch each row from the result set - do { - ssTO = helper.makeStorageSpaceTO(res); - result.add(ssTO); - } while (res.next()); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing INSERT query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return result; - } - - /** - * removeStorageSpace - * - * @param ss - * StorageSpace - * @throws DataAccessException - */ - public void removeStorageSpace(GridUserInterface user, String spaceToken) - throws DataAccessException { - - Connection conn = getConnection(); - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.removeByTokenQuery(conn, user, spaceToken); - log.debug("query = {}" , prepStatement.toString()); - - int res = prepStatement.executeUpdate(); - log.debug("Number of rows removed: {}" , res); - if (res <= 0) { - log.error("Error removing Storage Space with token = {} for " - + "user {} not found", spaceToken, user.getDn()); - - throw new DataAccessException("Storage Space with token = '" - + spaceToken + "' for user '" + user.getDn() + "' not found!"); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DELETE query", e); - } finally { - releaseConnection(null, prepStatement, conn); - } - } - - /** - * removeStorageSpace only by spaceToken - * - * @param ss - * StorageSpace - * @throws DataAccessException - */ - public void removeStorageSpace(String spaceToken) throws DataAccessException { - - Connection conn = getConnection(); - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.removeByTokenQuery(conn, spaceToken); - - log.debug("query = {}" , prepStatement.toString()); - - int res = prepStatement.executeUpdate(); - log.debug("Number of rows removed: {}" , res); - - if (res <= 0) { - log.error("Error removing Storage Space with token = {}. Space not found", - spaceToken); - - throw new DataAccessException("Storage Space with token = '" - + spaceToken + "' not found!"); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DELETE query", e); - } finally { - releaseConnection(null, prepStatement, conn); - } - } - - /** - * - * @param ssTO - * StorageSpaceTO - * @throws DataAccessException - */ - public void updateStorageSpace(StorageSpaceTO ssTO) - throws DataAccessException { - - Connection conn = getConnection(); - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.updateByAliasAndTokenQuery(conn, ssTO); - log.debug("UPDATE query = {}" , prepStatement.toString()); - - int res = prepStatement.executeUpdate(); - log.debug("UPDATE row count = {}" , res); - - if (res != 1) { - if (res < 1) { - log.error("No storage space rows updated by query : {}" - , prepStatement.toString()); - } else { - log.warn("More than a single storage space rows updated by " - + "query : {}. updated {} rows.", - prepStatement.toString(), res); - } - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing UPDATE query", e); - } finally { - releaseConnection(null, prepStatement, conn); - } - } - - /** - * - * @param ssTO - * StorageSpaceTO - * @throws DataAccessException - */ - public void updateStorageSpaceFreeSpace(StorageSpaceTO ssTO) - throws DataAccessException { - - long freeSpace = ssTO.getFreeSize(); - - Connection conn = getConnection(); - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.updateFreeSpaceByTokenQuery(conn, - ssTO.getSpaceToken(), freeSpace, new Date()); - - log.debug("UPDATE query = {}" , prepStatement.toString()); - - int res = prepStatement.executeUpdate(); - log.debug("UPDATE row count = {}", res); - if (res <= 0) { - log.error("No storage space rows updated by query : {}" - , prepStatement.toString()); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing UPDATE query", e); - } finally { - releaseConnection(null, prepStatement, conn); - } - } - - /** - * - * @param ssTO - * StorageSpaceTO - * @throws DataAccessException - */ - public void updateAllStorageSpace(StorageSpaceTO ssTO) - throws DataAccessException { - - Connection conn = getConnection(); - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.updateByTokenQuery(conn, ssTO); - log.debug("UPDATE query = {}", prepStatement.toString()); - - int res = prepStatement.executeUpdate(); - log.debug("UPDATE row count = {}" , res); - if (res != 1) { - if (res < 1) { - log.error("No storage space rows updated by query {}" - , prepStatement.toString()); - } else { - log.warn("More than a single storage space rows updated " - + "by query : {}. updated {} rows" - ,prepStatement.toString(), res); - } - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing UPDATE query", e); - } finally { - releaseConnection(null, prepStatement, conn); - } - } - - /** - * Method used to retrieve the set of StorageTO for expired space. - * - * @param long timeInSecond - * @return Collection of transfer object - */ - public Collection getExpired(long currentTimeInSecond) - throws DataAccessException { - - StorageSpaceTO ssTO = null; - Collection result = new LinkedList(); - - Connection conn = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.selectExpiredQuery(conn, currentTimeInSecond); - log.debug("DB query = {}" , prepStatement.toString()); - - res = prepStatement.executeQuery(); - - log.debug("query result = {}" , res); - if (res.first() == false) { - log.debug("No rows found for query : {}" , prepStatement.toString()); - throw new DataAccessException("No storage space expired found at time " - + currentTimeInSecond); - } else { - // Fetch each row from the result set - do { - ssTO = helper.makeStorageSpaceTO(res); - result.add(ssTO); - } while (res.next()); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DB query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return result; - } - - @Override - public int increaseUsedSpace(String spaceToken, long usedSpaceToAdd) - throws DataAccessException { - - Connection conn = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - int n = 0; - - try { - prepStatement = helper.increaseUsedSpaceByTokenQuery(conn, spaceToken, usedSpaceToAdd); - log.debug("DB query = {}" , prepStatement.toString()); - - n = prepStatement.executeUpdate(); - - log.debug("query result = {}" , n); - if (n == 0) { - log.debug("No rows updated for query : {}" , prepStatement.toString()); - throw new DataAccessException("No storage space updated!"); - } - - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DB query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return n; +import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.StorageSpaceDAO; +import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.model.StorageSpaceTO; +import it.grid.storm.persistence.pool.impl.StormBeIsamConnectionPool; +import it.grid.storm.persistence.util.helper.StorageSpaceSQLHelper; + +public class StorageSpaceDAOMySql extends AbstractDAO implements StorageSpaceDAO { + + private static final Logger log = LoggerFactory.getLogger(StorageSpaceDAOMySql.class); + + private static StorageSpaceDAO instance; + + public static synchronized StorageSpaceDAO getInstance() { + if (instance == null) { + instance = new StorageSpaceDAOMySql(); + } + return instance; + } + + private StorageSpaceSQLHelper helper; + + private StorageSpaceDAOMySql() { + super(StormBeIsamConnectionPool.getInstance()); + helper = new StorageSpaceSQLHelper(); + } + + /** + * addStorageSpace + * + * @param ss StorageSpace + */ + public void addStorageSpace(StorageSpaceTO ss) { + + Connection con = null; + PreparedStatement ps = null; + int res = 0; + + try { + + con = getConnection(); + ps = helper.insertQuery(con, ss); + + log.debug("INSERT query = {}", ps); + res = ps.executeUpdate(); + log.debug("INSERT result = {}", res); + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + + if (res <= 0) { + log.error("No rows inserted for Storage Space: {}", ss.toString()); + } + } + + /** + * getStorageSpaceById + * + * @param ssId Long + * @return StorageSpace + * @throws DataAccessException + */ + public StorageSpaceTO getStorageSpaceById(Long ssId) throws DataAccessException { + + throw new DataAccessException("getStorageSpaceById: Unimplemented method!"); + } + + public Collection findAll() throws DataAccessException { + + throw new DataAccessException("findAll: Unimplemented method!"); + } + + /** + * Returns a Collection of StorageSpaceTO owned by 'user' and with the specified alias + * ('spaceAlias'). 'spaceAlias' can be NULL or empty and in these cases a Collection of all the + * StorageSpaceTO owned by 'user' is returned. + * + * @param owner VomsGridUser. + * @param spaceAlias String. + * @return Collection of StorageSpaceTO. + */ + public Collection getStorageSpaceByOwner(GridUserInterface owner, + String spaceAlias) { + + StorageSpaceTO ssTO = null; + Collection result = new LinkedList(); + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + ps = helper.selectBySpaceAliasQuery(con, owner, spaceAlias); + + log.debug("DB query = {}", ps); + res = ps.executeQuery(); + log.debug("query result = {}", res); + + if (res.first()) { + do { + ssTO = helper.makeStorageSpaceTO(res); + result.add(ssTO); + } while (res.next()); + } else { + log.debug("No rows found for query : {}", ps); + } + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return result; + } + + /** + * Returns a Collection of StorageSpaceTO owned by 'VO'. + * + * @param stype. + * @return Collection of StorageSpaceTO. + */ + + public Collection getStorageSpaceBySpaceType(String stype) { + + StorageSpaceTO ssTO = null; + Collection result = new LinkedList(); + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + ps = helper.selectBySpaceType(con, stype); + + log.debug("DB query = {}", ps); + res = ps.executeQuery(); + log.debug("query result = {}", res); + + if (res.first()) { + do { + ssTO = helper.makeStorageSpaceTO(res); + result.add(ssTO); + } while (res.next()); + } else { + log.info("No rows found for query : {}", ps); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return result; + } + + /** + * Returns a Collection of StorageSpaceTO with the specified alias ('spaceAlias'). 'spaceAlias' + * can not be be NULL or empty. + * + * @param spaceAlias String. + * @return Collection of StorageSpaceTO. + */ + public Collection getStorageSpaceByAliasOnly(String spaceAlias) { + + StorageSpaceTO ssTO = null; + Collection result = new LinkedList(); + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + ps = helper.selectBySpaceAliasOnlyQuery(con, spaceAlias); + + log.debug("DB query = {}", ps); + res = ps.executeQuery(); + log.debug("query result = {}", res); + + if (res.first()) { + do { + ssTO = helper.makeStorageSpaceTO(res); + result.add(ssTO); + } while (res.next()); + } else { + log.info("No rows found for query : {}", ps); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return result; + } + + /** + * getStorageSpaceByToken + * + * @param token TSpaceToken + * @return StorageSpace , null if not row found on that token + */ + public StorageSpaceTO getStorageSpaceByToken(String token) { + + StorageSpaceTO ssTO = null; + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + ps = helper.selectByTokenQuery(con, token); + + log.debug("SELECT query = {}", ps); + res = ps.executeQuery(); + log.debug("SELECT result = {}", res); + + if (res.first()) { + ssTO = helper.makeStorageSpaceTO(res); + } else { + log.info("No rows found for query : {}", ps); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return ssTO; + } + + @Override + public Collection getStorageSpaceByUnavailableUsedSpace( + long unavailableSizeValue) { + + StorageSpaceTO ssTO = null; + Collection result = new LinkedList(); + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + ps = helper.selectByUnavailableUsedSpaceSizeQuery(con, unavailableSizeValue); + + log.debug("SELECT query = {}", ps); + res = ps.executeQuery(); + log.debug("SELECT result = {}", res); + + if (res.first()) { + do { + ssTO = helper.makeStorageSpaceTO(res); + result.add(ssTO); + } while (res.next()); + } else { + log.debug("No rows found for query : {}", ps); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); } - - @Override - public int decreaseUsedSpace(String spaceToken, long usedSpaceToRemove) - throws DataAccessException { - - Connection conn = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - int n = 0; - - try { - prepStatement = helper.decreaseUsedSpaceByTokenQuery(conn, spaceToken, usedSpaceToRemove); - log.debug("DB query = {}" , prepStatement.toString()); - - n = prepStatement.executeUpdate(); - - log.debug("query result = {}" , n); - if (n == 0) { - log.debug("No rows updated for query : {}" , prepStatement.toString()); - throw new DataAccessException("No storage space updated!"); - } - - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DB query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return n; + return result; + } + + @Override + public Collection getStorageSpaceByPreviousLastUpdate(Date lastUpdateTimestamp) { + + StorageSpaceTO ssTO = null; + Collection result = new LinkedList(); + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + ps = helper.selectByPreviousOrNullLastUpdateQuery(con, lastUpdateTimestamp.getTime()); + + log.debug("SELECT query = {}", ps); + res = ps.executeQuery(); + log.debug("SELECT result = {}", res); + + if (res.first()) { + do { + ssTO = helper.makeStorageSpaceTO(res); + result.add(ssTO); + } while (res.next()); + } else { + log.info("No rows found for query : {}", ps); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); } + return result; + } + + /** + * removeStorageSpace + * + * @param ss StorageSpace + */ + public void removeStorageSpace(GridUserInterface user, String spaceToken) { + + Connection con = null; + PreparedStatement ps = null; + int res = 0; + + try { + con = getConnection(); + ps = helper.removeByTokenQuery(con, user, spaceToken); + log.debug("query = {}", ps); + + res = ps.executeUpdate(); + log.debug("Number of rows removed: {}", res); + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + } + + /** + * removeStorageSpace only by spaceToken + * + * @param ss StorageSpace + * @throws DataAccessException + */ + public void removeStorageSpace(String spaceToken) throws DataAccessException { + + Connection con = null; + PreparedStatement ps = null; + int res = 0; + + try { + con = getConnection(); + ps = helper.removeByTokenQuery(con, spaceToken); + + log.debug("query = {}", ps); + res = ps.executeUpdate(); + log.debug("Number of rows removed: {}", res); + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + } + + /** + * + * @param ssTO StorageSpaceTO + */ + public void updateStorageSpace(StorageSpaceTO ssTO) { + + Connection con = null; + PreparedStatement ps = null; + int res = 0; + + try { + con = getConnection(); + ps = helper.updateByAliasAndTokenQuery(con, ssTO); + + log.debug("UPDATE query = {}", ps); + res = ps.executeUpdate(); + log.debug("UPDATE row count = {}", res); + + if (res == 0) { + log.warn("No storage space rows updated by query : {}", ps); + } + if (res > 1) { + log.warn( + "More than a single storage space rows updated by " + "query : {}. updated {} rows.", + ps, res); + } + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + + } + + /** + * + * @param ssTO StorageSpaceTO + * @throws DataAccessException + */ + public void updateStorageSpaceFreeSpace(StorageSpaceTO ssTO) throws DataAccessException { + + long freeSpace = ssTO.getFreeSize(); + + Connection con = null; + PreparedStatement ps = null; + int res = 0; + + try { + + con = getConnection(); + ps = helper.updateFreeSpaceByTokenQuery(con, ssTO.getSpaceToken(), freeSpace, new Date()); + + log.debug("UPDATE query = {}", ps); + res = ps.executeUpdate(); + log.debug("UPDATE row count = {}", res); + + if (res <= 0) { + log.warn("No storage space rows updated by query : {}", ps); + } + } catch (SQLException e) { + log.error(e.getMessage(), e); + throw new DataAccessException("Error while executing UPDATE query", e); + } finally { + closeStatement(ps); + closeConnection(con); + } + } + + /** + * + * @param ssTO StorageSpaceTO + */ + public void updateAllStorageSpace(StorageSpaceTO ssTO) { + + Connection con = null; + PreparedStatement ps = null; + int res = 0; + + try { + con = getConnection(); + ps = helper.updateByTokenQuery(con, ssTO); + + log.debug("UPDATE query = {}", ps); + res = ps.executeUpdate(); + log.debug("UPDATE row count = {}", res); + + if (res == 0) { + log.warn("No storage space rows updated by query {}", ps); + } + if (res > 1) { + log.warn( + "More than a single storage space rows updated " + "by query : {}. updated {} rows", ps, + res); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + } + + /** + * Method used to retrieve the set of StorageTO for expired space. + * + * @param long timeInSecond + * @return Collection of transfer object + */ + public Collection getExpired(long currentTimeInSecond) { + + StorageSpaceTO ssTO = null; + Collection result = new LinkedList(); + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + ps = helper.selectExpiredQuery(con, currentTimeInSecond); + + log.debug("DB query = {}", ps); + res = ps.executeQuery(); + log.debug("query result = {}", res); + + if (res.first()) { + do { + ssTO = helper.makeStorageSpaceTO(res); + result.add(ssTO); + } while (res.next()); + } else { + log.debug("No rows found for query : {}", ps); + log.debug("No storage space expired found at time " + currentTimeInSecond); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + + return result; + } + + @Override + public int increaseUsedSpace(String spaceToken, long usedSpaceToAdd) throws DataAccessException { + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + int n = 0; + + try { + + con = getConnection(); + ps = helper.increaseUsedSpaceByTokenQuery(con, spaceToken, usedSpaceToAdd); + + log.debug("DB query = {}", ps); + n = ps.executeUpdate(); + log.debug("query result = {}", n); + + if (n == 0) { + log.debug("No storage space updated!"); + } + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + + return n; + } + + @Override + public int decreaseUsedSpace(String spaceToken, long usedSpaceToRemove) + throws DataAccessException { + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + int n = 0; + + try { + + con = getConnection(); + ps = helper.decreaseUsedSpaceByTokenQuery(con, spaceToken, usedSpaceToRemove); + + log.debug("DB query = {}", ps); + n = ps.executeUpdate(); + log.debug("query result = {}", n); + + if (n == 0) { + log.debug("No storage space updated!"); + } + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + + return n; + } } diff --git a/src/main/java/it/grid/storm/persistence/impl/mysql/TapeRecallDAOMySql.java b/src/main/java/it/grid/storm/persistence/impl/mysql/TapeRecallDAOMySql.java index ba64a71ab..996b897f9 100644 --- a/src/main/java/it/grid/storm/persistence/impl/mysql/TapeRecallDAOMySql.java +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/TapeRecallDAOMySql.java @@ -5,747 +5,707 @@ package it.grid.storm.persistence.impl.mysql; import static it.grid.storm.persistence.model.TapeRecallTO.RecallTaskType.valueOf; - -import com.google.common.collect.Lists; - -import it.grid.storm.persistence.PersistenceDirector; -import it.grid.storm.persistence.dao.TapeRecallDAO; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.model.TapeRecallTO; -import it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper; -import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.tape.recalltable.model.TapeRecallStatus; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_DATE; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_DEFERRED_STARTTIME; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_FILE_NAME; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_FINAL_STATUS_DATE; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_GROUP_TASK_ID; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_IN_PROGRESS_DATE; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_PIN_LIFETIME; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_REQUEST_TYPE; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_RETRY_ATTEMPT; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_STATUS; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_TASK_ID; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_USER_ID; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_VO_NAME; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; -import java.sql.Statement; import java.sql.Timestamp; import java.util.Calendar; import java.util.Date; import java.util.GregorianCalendar; import java.util.List; +import java.util.Optional; import java.util.UUID; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class TapeRecallDAOMySql extends TapeRecallDAO { +import com.google.common.collect.Lists; + +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.TapeRecallDAO; +import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.model.TapeRecallTO; +import it.grid.storm.persistence.pool.impl.StormBeIsamConnectionPool; +import it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper; +import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.tape.recalltable.model.TapeRecallStatus; + +public class TapeRecallDAOMySql extends AbstractDAO implements TapeRecallDAO { + + private static final Logger log = LoggerFactory.getLogger(TapeRecallDAOMySql.class); + + private static TapeRecallDAO instance; + + public static synchronized TapeRecallDAO getInstance() { + if (instance == null) { + instance = new TapeRecallDAOMySql(); + } + return instance; + } + + private final TapeRecallMySQLHelper sqlHelper; + + private TapeRecallDAOMySql() { + + super(StormBeIsamConnectionPool.getInstance()); + sqlHelper = new TapeRecallMySQLHelper(); + } + + @Override + public int getNumberInProgress() throws DataAccessException { + + return getNumberInProgress(null); + } + + @Override + public int getNumberInProgress(String voName) throws DataAccessException { - private static final Logger log = LoggerFactory - .getLogger(TapeRecallDAOMySql.class); - - private final TapeRecallMySQLHelper sqlHelper; - - public TapeRecallDAOMySql() { - - sqlHelper = new TapeRecallMySQLHelper(PersistenceDirector.getDataBase() - .getDbmsVendor()); - } - - @Override - public int getNumberInProgress() throws DataAccessException { - - return getNumberInProgress(null); - } - - @Override - public int getNumberInProgress(String voName) throws DataAccessException { - - Connection dbConnection = getConnection(); - int status = 0; - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - if (voName == null) { - prepStatement = sqlHelper.getQueryNumberInProgress(dbConnection); - } else { - prepStatement = sqlHelper - .getQueryNumberInProgress(dbConnection, voName); - } - - log.debug("QUERY: {}", prepStatement); - - res = prepStatement.executeQuery(); - - if (res.first()) { - status = res.getInt(1); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return status; - } - - @Override - public int getNumberQueued() throws DataAccessException { - - return getNumberQueued(null); - } - - @Override - public int getNumberQueued(String voName) throws DataAccessException { - - Connection dbConnection = getConnection(); - int status = 0; - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - if (voName == null) { - prepStatement = sqlHelper.getQueryNumberQueued(dbConnection); - } else { - prepStatement = sqlHelper.getQueryNumberQueued(dbConnection, voName); - } - - log.debug("QUERY: {}", prepStatement); - res = prepStatement.executeQuery(); - - if (res.first()) { - status = res.getInt(1); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return status; - } - - @Override - public int getReadyForTakeOver() throws DataAccessException { - - return getReadyForTakeOver(null); - } - - @Override - public int getReadyForTakeOver(String voName) throws DataAccessException { - - Connection dbConnection = getConnection(); - int status = 0; - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - if (voName == null) { - prepStatement = sqlHelper.getQueryReadyForTakeOver(dbConnection); - } else { - prepStatement = sqlHelper - .getQueryReadyForTakeOver(dbConnection, voName); - } - - log.debug("QUERY: {}", prepStatement); - res = prepStatement.executeQuery(); - - if (res.first()) { - status = res.getInt(1); - } - } catch (SQLException e) { - throw new DataAccessException("Error executing query: '" - + prepStatement + "' " + e.getMessage(), e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return status; - } - - @Override - public List getGroupTasks(UUID groupTaskId) - throws DataAccessException { - - TapeRecallTO task = null; - List taskList = Lists.newArrayList(); - - Connection dbConnection = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - prepStatement = sqlHelper - .getQueryGetGroupTasks(dbConnection, groupTaskId); - - log.debug("QUERY: {}", prepStatement); - res = prepStatement.executeQuery(); - - if (!res.first()) { - log.error("No tasks with GroupTaskId='{}'", groupTaskId); - throw new DataAccessException( - "No recall table row retrieved executing query: '" - + prepStatement + "'"); - } - do { - task = new TapeRecallTO(); - setTaskInfo(task, res); - taskList.add(task); - } while (res.next()); - } catch (SQLException e) { - throw new DataAccessException("Error executing query: '" - + prepStatement + "' " + e.getMessage(), e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return taskList; - } - - @Override - public boolean existsGroupTask(UUID groupTaskId) throws DataAccessException { - - boolean response = false; - - Connection dbConnection = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - prepStatement = sqlHelper - .getQueryGetGroupTasks(dbConnection, groupTaskId); - - log.debug("QUERY: {}", prepStatement); - res = prepStatement.executeQuery(); - response = res.first(); - if (!response) { - log.info("No tasks found with GroupTaskId='{}'",groupTaskId); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error executing query: '" - + prepStatement + "' " + e.getMessage(), e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return response; - } - - @Override - public TapeRecallTO getTask(UUID taskId, String requestToken) - throws DataAccessException { - - TapeRecallTO task; - Connection dbConnection = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - prepStatement = sqlHelper.getQueryGetTask(dbConnection, taskId, - requestToken); - log.debug("QUERY: {}", prepStatement); - res = prepStatement.executeQuery(); - - if (!res.first()) { - log.error("No task found for requestToken={} taskId={}. Query={}", requestToken, taskId, prepStatement); - - throw new DataAccessException("No task found for requestToken=" - + requestToken + " " + "taskId=" + taskId + ". Query = " - + prepStatement); - } - task = new TapeRecallTO(); - setTaskInfo(task, res); - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error executing query: '" - + prepStatement + "' " + e.getMessage(), e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return task; - } - - @Override - public boolean existsTask(UUID taskId, String requestToken) - throws DataAccessException { - - boolean response; - - Connection dbConnection = getConnection(); - ResultSet res = null; - - PreparedStatement prepStatement = null; - - try { - prepStatement = sqlHelper.getQueryGetTask(dbConnection, taskId, - requestToken); - - log.debug("QUERY: {}", prepStatement); - res = prepStatement.executeQuery(); - response = res.first(); - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error executing query: '" - + prepStatement + "' " + e.getMessage(), e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return response; - } - - @Override - public UUID insertCloneTask(TapeRecallTO task, int[] statuses, - UUID proposedGroupTaskId) throws DataAccessException { - - if (task.getTaskId() == null || task.getRequestToken() == null - || task.getRequestToken().getValue().trim().isEmpty()) { - log - .error("received Task insert request with empty primary key field TaskId or RequestToken. TaskId = {}, request token = {}", task.getTaskId(), task.getRequestToken()); - throw new DataAccessException( - "Unable to create insert the task wth the provided UUID and " - + "request token using UUID-namebased algorithm. TaskId = " - + task.getTaskId() + " , request token = " + task.getRequestToken()); - } - Integer status = task.getStatusId(); - - Connection dbConnection = getConnection(); - PreparedStatement prepStat = null; - - try { - dbConnection.setAutoCommit(false); - } catch (SQLException e) { - log.error("Error setting autocommit to false! {}", e.getMessage()); - throw new DataAccessException("Error setting autocommit to false! " - + e.getMessage(), e); - } - - ResultSet res = null; - try { - - if (statuses == null || statuses.length == 0) { - prepStat = sqlHelper.getQueryGetGroupTaskIds(dbConnection, - task.getTaskId()); - } else { - prepStat = sqlHelper.getQueryGetGroupTaskIds(dbConnection, - task.getTaskId(), statuses); - } - log.debug("QUERY: {}", prepStat); - - res = prepStat.executeQuery(); - - if (res.first()) { - /* Take the first, but there can be more than one result */ - String uuidString = res - .getString(TapeRecallMySQLHelper.COL_GROUP_TASK_ID); - status = Integer.valueOf(res.getInt(TapeRecallMySQLHelper.COL_STATUS)); - task.setStatusId(status.intValue()); - task.setGroupTaskId(UUID.fromString(uuidString)); - Calendar calendar = new GregorianCalendar(); - try { - task.forceStatusUpdateInstants( - res.getDate(TapeRecallMySQLHelper.COL_IN_PROGRESS_DATE, calendar), - res.getDate(TapeRecallMySQLHelper.COL_FINAL_STATUS_DATE, calendar)); - } catch (IllegalArgumentException e) { - log.error("Unable to set status update timestamps on the coned task"); - } - } else { - log.debug("No task found for taskId={} Creating a new group entry", task.getTaskId()); - task.setGroupTaskId(proposedGroupTaskId); - task.setStatusId(status.intValue()); - } - - prepStat = sqlHelper.getQueryInsertTask(dbConnection, task); - if (prepStat == null) { - // this case is possible if and only if the task is null or empty - log.error("Cannot create the query because the task is null or empty."); - throw new DataAccessException( - "Cannot create the query because the task is null or empty."); - } - try { - log.debug("Query(insert-task)={}", prepStat); - prepStat.executeUpdate(); - commit(dbConnection); - } catch (SQLException e) { - rollback(dbConnection); - throw new DataAccessException("Error executing query : " - + prepStat + " ; " + e.getMessage(), e); - } - } catch (SQLException e) { - rollback(dbConnection); - throw new DataAccessException("Error executing query : " + " ; " - + e.getMessage(), e); - } finally { - releaseConnection(new ResultSet[] { res }, new Statement[] { prepStat }, - dbConnection); - } - return task.getGroupTaskId(); - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.persistence.dao.TapeRecallDAO#purgeCompletedTasks(int) - */ - @Override - public int purgeCompletedTasks(long expirationTime, int numTasks) throws DataAccessException { - - PreparedStatement ps = null; - Connection con = getConnection(); - - int count = 0; - boolean hasLimit = numTasks > 0; - try { - if (hasLimit) { - ps = sqlHelper.getQueryDeleteCompletedTasks(con, expirationTime, numTasks); - } else { - ps = sqlHelper.getQueryDeleteCompletedTasks(con, expirationTime); - } - - count = ps.executeUpdate(); - - } catch (SQLException e) { - throw new DataAccessException("Error executing query: " + ps, e); - } finally { - releaseConnection(null, ps, con); - } - - return count; - } - - @Override - public void setGroupTaskRetryValue(UUID groupTaskId, int value) - throws DataAccessException { - - Connection dbConnection = getConnection(); - PreparedStatement prepStatement = null; - - try { - prepStatement = sqlHelper.getQuerySetGroupTaskRetryValue(dbConnection, - groupTaskId, value); - - prepStatement.executeUpdate(); - } catch (SQLException e) { - throw new DataAccessException("Error executing query: " - + prepStatement, e); - } finally { - releaseConnection(null, prepStatement, dbConnection); - } - - } - - @Override - public TapeRecallTO takeoverTask() throws DataAccessException { - - return takeoverTask(null); - } - - @Override - public TapeRecallTO takeoverTask(String voName) throws DataAccessException { - - List taskList = takeoverTasksWithDoubles(1, voName); - - if (taskList.isEmpty()) { - return null; - } - return taskList.get(0); - } - - @Override - public List takeoverTasksWithDoubles(int numberOfTaks) - throws DataAccessException { - - return takeoverTasksWithDoubles(numberOfTaks, null); - } - - @Override - public List takeoverTasksWithDoubles(int numberOfTaks, - String voName) throws DataAccessException { - - Connection dbConnection = getConnection(); - - List taskList = Lists.newLinkedList(); - TapeRecallTO task = null; - ResultSet res = null; - - PreparedStatement prepStatement = null; - - try { - dbConnection.setAutoCommit(false); - } catch (SQLException e) { - log.error("Error setting autocommit to false! {}", e.getMessage()); - throw new DataAccessException("Error setting autocommit to false! " - + e.getMessage(), e); - } - try { - if (voName == null) { - prepStatement = sqlHelper.getQueryGetTakeoverTasksWithDoubles( - dbConnection, numberOfTaks); - } else { - prepStatement = sqlHelper.getQueryGetTakeoverTasksWithDoubles( - dbConnection, numberOfTaks, voName); - } - // start transaction - log.debug("QUERY: {}", prepStatement); - res = prepStatement.executeQuery(); - if (!res.first()) { - log.info("No tape recall rows ready for takeover"); - return taskList; - } - do { - task = new TapeRecallTO(); - setTaskInfo(task, res); - task.setStatus(TapeRecallStatus.IN_PROGRESS); - taskList.add(task); - } while (res.next()); - if (!taskList.isEmpty()) { - try { - prepStatement = sqlHelper.getQueryUpdateTasksStatus(dbConnection, - taskList, TapeRecallStatus.IN_PROGRESS.getStatusId(), - TapeRecallMySQLHelper.COL_IN_PROGRESS_DATE, new Date()); - } catch (IllegalArgumentException e) { - log - .error("Unable to obtain the query to update task status and set status transition timestamp. IllegalArgumentException: " - + e.getMessage()); - throw new DataAccessException( - "Unable to obtain the query to update task status and set status transition timestamp"); - } - prepStatement.executeUpdate(); - } - commit(dbConnection); - } catch (SQLException e) { - rollback(dbConnection); - throw new DataAccessException("Error executing query: " - + prepStatement, e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return taskList; - } - - @Override - public List getAllInProgressTasks(int numberOfTaks) - throws DataAccessException { - - Connection dbConnection = getConnection(); - ResultSet res = null; - List taskList = Lists.newArrayList(); - - PreparedStatement prepStatement = null; - - try { - prepStatement = sqlHelper.getQueryGetAllTasksInProgress(dbConnection, - numberOfTaks); - - log.debug("getAllInProgressTasks query: {}", prepStatement); - - res = prepStatement.executeQuery(); - - boolean emptyResultSet = true; - - while (res.next()) { - - emptyResultSet = false; - TapeRecallTO task = new TapeRecallTO(); - setTaskInfo(task, res); - taskList.add(task); - } - - if (emptyResultSet) { - - log.debug("No in progress recall tasks found."); - } - - } catch (Exception e) { - - log.error("Error executing query: {}", prepStatement, e); - throw new DataAccessException("Error executing query: " - + prepStatement, e); - - } finally { - - releaseConnection(res, prepStatement, dbConnection); - } - - return taskList; - } - - private void setTaskInfo(TapeRecallTO task, ResultSet res) - throws DataAccessException { - - if (res == null) { - throw new DataAccessException("Unable to build Task from NULL ResultSet"); - } - - String requestTokenStr = null; - Timestamp insertionInstant; - try { - requestTokenStr = res.getString(TapeRecallMySQLHelper.COL_REQUEST_TOKEN); - insertionInstant = res.getTimestamp(TapeRecallMySQLHelper.COL_DATE); - - } catch (SQLException e) { - throw new DataAccessException( - "Unable to retrieve RequestToken String from ResultSet. " + e); - } - try { - task - .setRequestToken(new TRequestToken(requestTokenStr, insertionInstant)); - } catch (InvalidTRequestTokenAttributesException e) { - throw new DataAccessException( - "Unable to build TRequestToken from token='" + requestTokenStr + "'. " - + e); - } - - UUID groupTaskId = null; - String groupTaskIdStr = null; - try { - groupTaskIdStr = res.getString(TapeRecallMySQLHelper.COL_GROUP_TASK_ID); - if (groupTaskIdStr != null) { - try { - groupTaskId = UUID.fromString(groupTaskIdStr); - task.setGroupTaskId(groupTaskId); - } catch (IllegalArgumentException iae) { - throw new DataAccessException( - "Unable to build UUID from GroupTaskId='" + groupTaskId + "'. " - + iae); - } - } - } catch (SQLException e) { - throw new DataAccessException( - "Unable to retrieve GroupTaskId String from ResultSet. " + e); - } - - // do not set the task ID, it is produced by the setFilename call - - try { - - task.setRequestType(valueOf(res.getString(TapeRecallMySQLHelper.COL_REQUEST_TYPE))); - task.setFileName(res.getString(TapeRecallMySQLHelper.COL_FILE_NAME)); - task.setPinLifetime(res.getInt(TapeRecallMySQLHelper.COL_PIN_LIFETIME)); - task.setStatusId(res.getInt(TapeRecallMySQLHelper.COL_STATUS)); - task.setVoName(res.getString(TapeRecallMySQLHelper.COL_VO_NAME)); - task.setUserID(res.getString(TapeRecallMySQLHelper.COL_USER_ID)); - task.setRetryAttempt(res.getInt(TapeRecallMySQLHelper.COL_RETRY_ATTEMPT)); - Calendar calendar = new GregorianCalendar(); - task.setDeferredRecallInstant(res.getTimestamp( - TapeRecallMySQLHelper.COL_DEFERRED_STARTTIME, calendar)); - task.setInsertionInstant(res.getTimestamp(TapeRecallMySQLHelper.COL_DATE, - calendar)); - try { - task.forceStatusUpdateInstants(res.getTimestamp( - TapeRecallMySQLHelper.COL_IN_PROGRESS_DATE, calendar), res - .getTimestamp(TapeRecallMySQLHelper.COL_FINAL_STATUS_DATE, calendar)); - } catch (IllegalArgumentException e) { - log.error("Unable to set status update timestamps on the coned task"); - } - } catch (SQLException e) { - throw new DataAccessException("Unable to getting info from ResultSet. " - + e); - } - } - - @Override - public boolean setGroupTaskStatus(UUID groupTaskId, int newStatusId, - Date timestamp) throws DataAccessException { - - PreparedStatement prepStatement = null; - Connection dbConnection = getConnection(); - - try { - dbConnection.setAutoCommit(false); - } catch (SQLException e) { - log.error("Error setting autocommit to false! {}", e.getMessage()); - throw new DataAccessException("Error setting autocommit to false! " - + e.getMessage(), e); - } - - ResultSet res = null; - boolean ret = false; - int oldStatusId = -1; - - try { - - try { - prepStatement = sqlHelper.getQueryGetGroupTasks(dbConnection, - groupTaskId); - - log.debug("QUERY: {}", prepStatement); - // retrieves the tasks of this task group - res = prepStatement.executeQuery(); - - if (!res.first()) { - log.error("No tasks with GroupTaskId='{}'", groupTaskId); - throw new DataAccessException( - "No recall table row retrieved executing query: '" - + prepStatement + "'"); - } - // verify if their stored status is equal for all - oldStatusId = res.getInt(TapeRecallMySQLHelper.COL_STATUS); - do { - int currentStatusId = res.getInt(TapeRecallMySQLHelper.COL_STATUS); - if (currentStatusId != oldStatusId) { - log.warn("The tasks with groupTaskId {} have different statuses: {} from task {} differs " - + "from expected {}", groupTaskId, currentStatusId, - res.getString(TapeRecallMySQLHelper.COL_TASK_ID), oldStatusId); - break; - } - oldStatusId = currentStatusId; - } while (res.next()); - } catch (SQLException e) { - log - .error("Unable to retrieve groupTaskId related tasks. SQLException: {}", e); - throw new DataAccessException( - "Unable to retrieve groupTaskId related tasks. "); - } - if (oldStatusId != newStatusId) { - // update the task status and if is a valid transition set the relative - // transition timestamp - if (!TapeRecallStatus.getRecallTaskStatus(oldStatusId).precedes( - newStatusId)) { - log - .warn("Requested the update of the status of a recall task group to status {} that is precedent " - + "to the recorded status performing the request the same...", newStatusId, oldStatusId); - } - String timestampColumn = null; - if (TapeRecallStatus.isFinalStatus(newStatusId)) { - timestampColumn = TapeRecallMySQLHelper.COL_FINAL_STATUS_DATE; - } else { - if (TapeRecallStatus.IN_PROGRESS.equals(TapeRecallStatus - .getRecallTaskStatus(newStatusId))) { - timestampColumn = TapeRecallMySQLHelper.COL_IN_PROGRESS_DATE; - } else { - log - .warn("unable to determine the status update timestamp column to use given the new statusId '{}'", newStatusId); - } - } - if (timestampColumn != null) { - try { - prepStatement = sqlHelper.getQueryUpdateGroupTaskStatus( - dbConnection, groupTaskId, newStatusId, timestampColumn, - timestamp); - } catch (IllegalArgumentException e) { - log - .error("Unable to obtain the query to update task status and set status transition timestamp. IllegalArgumentException: {}", e.getMessage()); - throw new DataAccessException( - "Unable to obtain the query to update task status and set status transition timestamp"); - } catch (SQLException e) { - throw new DataAccessException("Error executing query: " - + prepStatement.toString(), e); - } - } else { - try { - prepStatement = sqlHelper.getQuerySetGroupTaskStatus(dbConnection, - groupTaskId, newStatusId); - } catch (SQLException e) { - throw new DataAccessException("Error executing query: " - + prepStatement.toString(), e); - } - } - try { - if (prepStatement.executeUpdate() > 0) { - ret = true; - } - commit(dbConnection); - } catch (SQLException e) { - throw new DataAccessException("Error executing query: " - + prepStatement.toString(), e); - } - } else { - log - .warn("Skipping the status upadate operation, the status already stored is equal to the new one provided"); - } - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return ret; - } + Connection con = null; + int status = 0; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + if (voName == null) { + ps = sqlHelper.getQueryNumberInProgress(con); + } else { + ps = sqlHelper.getQueryNumberInProgress(con, voName); + } + + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + + if (res.first()) { + status = res.getInt(1); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return status; + } + + @Override + public int getNumberQueued() throws DataAccessException { + + return getNumberQueued(null); + } + + @Override + public int getNumberQueued(String voName) throws DataAccessException { + + Connection con = null; + int status = 0; + ResultSet res = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + + if (voName == null) { + ps = sqlHelper.getQueryNumberQueued(con); + } else { + ps = sqlHelper.getQueryNumberQueued(con, voName); + } + + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + + if (res.first()) { + status = res.getInt(1); + } + } catch (SQLException e) { + log.error(e.getMessage(), e); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return status; + } + + @Override + public int getReadyForTakeOver() throws DataAccessException { + + return getReadyForTakeOver(null); + } + + @Override + public int getReadyForTakeOver(String voName) throws DataAccessException { + + Connection con = null; + int status = 0; + ResultSet res = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + + if (voName == null) { + ps = sqlHelper.getQueryReadyForTakeOver(con); + } else { + ps = sqlHelper.getQueryReadyForTakeOver(con, voName); + } + + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + + if (res.first()) { + status = res.getInt(1); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return status; + } + + @Override + public List getGroupTasks(UUID groupTaskId) throws DataAccessException { + + TapeRecallTO task = null; + List taskList = Lists.newArrayList(); + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + ps = sqlHelper.getQueryGetGroupTasks(con, groupTaskId); + + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + + if (res.first()) { + do { + task = new TapeRecallTO(); + setTaskInfo(task, res); + taskList.add(task); + } while (res.next()); + } else { + log.info("No tasks with GroupTaskId='{}'", groupTaskId); + } + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return taskList; + } + + @Override + public boolean existsGroupTask(UUID groupTaskId) throws DataAccessException { + + boolean response = false; + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + ps = sqlHelper.getQueryGetGroupTasks(con, groupTaskId); + + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + response = res.first(); + if (!response) { + log.info("No tasks found with GroupTaskId='{}'", groupTaskId); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return response; + } + + @Override + public Optional getTask(UUID taskId, String requestToken) + throws DataAccessException { + + TapeRecallTO task = null; + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + ps = sqlHelper.getQueryGetTask(con, taskId, requestToken); + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + + if (res.first()) { + task = new TapeRecallTO(); + setTaskInfo(task, res); + } else { + log.info("No task found for requestToken={} taskId={}. Query={}", requestToken, taskId, ps); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return Optional.ofNullable(task); + } + + @Override + public boolean existsTask(UUID taskId, String requestToken) throws DataAccessException { + + boolean response = false; + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + ps = sqlHelper.getQueryGetTask(con, taskId, requestToken); + + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + response = res.first(); + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return response; + } + + @Override + public UUID insertCloneTask(TapeRecallTO task, int[] statuses, UUID proposedGroupTaskId) + throws DataAccessException { + + if (task.getTaskId() == null || task.getRequestToken() == null + || task.getRequestToken().getValue().trim().isEmpty()) { + log.error( + "received Task insert request with empty primary key field TaskId or RequestToken. TaskId = {}, request token = {}", + task.getTaskId(), task.getRequestToken()); + throw new DataAccessException("Unable to create insert the task with the provided UUID and " + + "request token using UUID-namebased algorithm. TaskId = " + task.getTaskId() + + " , request token = " + task.getRequestToken()); + } + int status = task.getStatusId(); + + Connection con = null; + PreparedStatement ps = null; + ResultSet res = null; + + try { + + con = getConnection(); + + if (statuses == null || statuses.length == 0) { + ps = sqlHelper.getQueryGetGroupTaskIds(con, task.getTaskId()); + } else { + ps = sqlHelper.getQueryGetGroupTaskIds(con, task.getTaskId(), statuses); + } + log.debug("QUERY: {}", ps); + + res = ps.executeQuery(); + + if (res.first()) { + /* Take the first, but there can be more than one result */ + String uuidString = res.getString(COL_GROUP_TASK_ID); + status = res.getInt(COL_STATUS); + task.setStatusId(status); + task.setGroupTaskId(UUID.fromString(uuidString)); + Calendar calendar = new GregorianCalendar(); + try { + task.forceStatusUpdateInstants( + res.getDate(TapeRecallMySQLHelper.COL_IN_PROGRESS_DATE, calendar), + res.getDate(TapeRecallMySQLHelper.COL_FINAL_STATUS_DATE, calendar)); + } catch (IllegalArgumentException e) { + log.error("Unable to set status update timestamps on the coned task"); + } + } else { + log.debug("No task found for taskId={} Creating a new group entry", task.getTaskId()); + task.setGroupTaskId(proposedGroupTaskId); + task.setStatusId(status); + } + + ps = sqlHelper.getQueryInsertTask(con, task); + if (ps == null) { + // this case is possible if and only if the task is null or empty + log.error("Cannot create the query because the task is null or empty."); + throw new DataAccessException("Cannot create the query because the task is null or empty."); + } + log.debug("Query(insert-task)={}", ps); + int n = ps.executeUpdate(); + log.debug("Query(insert-task)={} exited with {}", ps, n); + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return task.getGroupTaskId(); + } + + @Override + public int purgeCompletedTasks(long expirationTime, int numTasks) throws DataAccessException { + + PreparedStatement ps = null; + Connection con = null; + int count = 0; + boolean hasLimit = numTasks > 0; + + try { + + con = getConnection(); + if (hasLimit) { + ps = sqlHelper.getQueryDeleteCompletedTasks(con, expirationTime, numTasks); + } else { + ps = sqlHelper.getQueryDeleteCompletedTasks(con, expirationTime); + } + + count = ps.executeUpdate(); + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + + return count; + } + + @Override + public void setGroupTaskRetryValue(UUID groupTaskId, int value) throws DataAccessException { + + Connection con = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + ps = sqlHelper.getQuerySetGroupTaskRetryValue(con, groupTaskId, value); + ps.executeUpdate(); + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + + } + + @Override + public TapeRecallTO takeoverTask() throws DataAccessException { + + return takeoverTask(null); + } + + @Override + public TapeRecallTO takeoverTask(String voName) throws DataAccessException { + + List taskList = takeoverTasksWithDoubles(1, voName); + + if (taskList.isEmpty()) { + return null; + } + return taskList.get(0); + } + + @Override + public List takeoverTasksWithDoubles(int numberOfTaks) throws DataAccessException { + + return takeoverTasksWithDoubles(numberOfTaks, null); + } + + @Override + public List takeoverTasksWithDoubles(int numberOfTaks, String voName) + throws DataAccessException { + + List taskList = Lists.newLinkedList(); + TapeRecallTO task = null; + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + + if (voName == null) { + ps = sqlHelper.getQueryGetTakeoverTasksWithDoubles(con, numberOfTaks); + } else { + ps = sqlHelper.getQueryGetTakeoverTasksWithDoubles(con, numberOfTaks, voName); + } + + // start transaction + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + if (res.first()) { + do { + task = new TapeRecallTO(); + setTaskInfo(task, res); + task.setStatus(TapeRecallStatus.IN_PROGRESS); + taskList.add(task); + } while (res.next()); + if (!taskList.isEmpty()) { + try { + ps = sqlHelper.getQueryUpdateTasksStatus(con, taskList, + TapeRecallStatus.IN_PROGRESS.getStatusId(), COL_IN_PROGRESS_DATE, new Date()); + } catch (IllegalArgumentException e) { + log.error( + "Unable to obtain the query to update task status and set status transition timestamp. IllegalArgumentException: " + + e.getMessage()); + throw new DataAccessException( + "Unable to obtain the query to update task status and set status transition timestamp"); + } + ps.executeUpdate(); + } + } else { + log.info("No tape recall rows ready for takeover"); + } + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return taskList; + } + + @Override + public List getAllInProgressTasks(int numberOfTaks) throws DataAccessException { + + List taskList = Lists.newArrayList(); + + Connection con = null; + PreparedStatement ps = null; + ResultSet res = null; + + try { + con = getConnection(); + ps = sqlHelper.getQueryGetAllTasksInProgress(con, numberOfTaks); + + log.debug("getAllInProgressTasks query: {}", ps); + + res = ps.executeQuery(); + + boolean emptyResultSet = true; + + while (res.next()) { + + emptyResultSet = false; + TapeRecallTO task = new TapeRecallTO(); + setTaskInfo(task, res); + taskList.add(task); + } + + if (emptyResultSet) { + + log.debug("No in progress recall tasks found."); + } + + } catch (SQLException e) { + + e.printStackTrace(); + + } finally { + + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + + return taskList; + } + + private void setTaskInfo(TapeRecallTO task, ResultSet res) throws DataAccessException { + + if (res == null) { + throw new DataAccessException("Unable to build Task from NULL ResultSet"); + } + + String requestTokenStr = null; + Timestamp insertionInstant; + try { + requestTokenStr = res.getString(TapeRecallMySQLHelper.COL_REQUEST_TOKEN); + insertionInstant = res.getTimestamp(TapeRecallMySQLHelper.COL_DATE); + + } catch (SQLException e) { + throw new DataAccessException("Unable to retrieve RequestToken String from ResultSet. " + e); + } + try { + task.setRequestToken(new TRequestToken(requestTokenStr, insertionInstant)); + } catch (InvalidTRequestTokenAttributesException e) { + throw new DataAccessException( + "Unable to build TRequestToken from token='" + requestTokenStr + "'. " + e); + } + + UUID groupTaskId = null; + String groupTaskIdStr = null; + try { + groupTaskIdStr = res.getString(TapeRecallMySQLHelper.COL_GROUP_TASK_ID); + if (groupTaskIdStr != null) { + try { + groupTaskId = UUID.fromString(groupTaskIdStr); + task.setGroupTaskId(groupTaskId); + } catch (IllegalArgumentException iae) { + throw new DataAccessException( + "Unable to build UUID from GroupTaskId='" + groupTaskId + "'. " + iae); + } + } + } catch (SQLException e) { + throw new DataAccessException("Unable to retrieve GroupTaskId String from ResultSet. " + e); + } + + // do not set the task ID, it is produced by the setFilename call + + try { + + task.setRequestType(valueOf(res.getString(COL_REQUEST_TYPE))); + task.setFileName(res.getString(COL_FILE_NAME)); + task.setPinLifetime(res.getInt(COL_PIN_LIFETIME)); + task.setStatusId(res.getInt(COL_STATUS)); + task.setVoName(res.getString(COL_VO_NAME)); + task.setUserID(res.getString(COL_USER_ID)); + task.setRetryAttempt(res.getInt(COL_RETRY_ATTEMPT)); + Calendar calendar = new GregorianCalendar(); + task.setDeferredRecallInstant(res.getTimestamp(COL_DEFERRED_STARTTIME, calendar)); + task.setInsertionInstant(res.getTimestamp(COL_DATE, calendar)); + try { + task.forceStatusUpdateInstants(res.getTimestamp(COL_IN_PROGRESS_DATE, calendar), + res.getTimestamp(COL_FINAL_STATUS_DATE, calendar)); + } catch (IllegalArgumentException e) { + log.error("Unable to set status update timestamps on the coned task"); + } + } catch (SQLException e) { + throw new DataAccessException("Unable to getting info from ResultSet. " + e); + } + } + + @Override + public boolean setGroupTaskStatus(UUID groupTaskId, int newStatusId, Date timestamp) + throws DataAccessException { + + Connection con = null; + PreparedStatement ps = null; + ResultSet res = null; + + boolean ret = false; + int oldStatusId = -1; + + try { + con = getConnection(); + + ps = sqlHelper.getQueryGetGroupTasks(con, groupTaskId); + + log.debug("QUERY: {}", ps); + + // retrieves the tasks of this task group + res = ps.executeQuery(); + + if (!res.first()) { + log.error("No tasks with GroupTaskId='{}'", groupTaskId); + throw new DataAccessException( + "No recall table row retrieved executing query: '" + ps + "'"); + } + + // verify if their stored status is equal for all + oldStatusId = res.getInt(COL_STATUS); + do { + int currentStatusId = res.getInt(COL_STATUS); + if (currentStatusId != oldStatusId) { + log.warn( + "The tasks with groupTaskId {} have different statuses: {} from task {} differs " + + "from expected {}", + groupTaskId, currentStatusId, res.getString(COL_TASK_ID), oldStatusId); + break; + } + oldStatusId = currentStatusId; + } while (res.next()); + + if (oldStatusId != newStatusId) { + // update the task status and if is a valid transition set the relative transition timestamp + if (!TapeRecallStatus.getRecallTaskStatus(oldStatusId).precedes(newStatusId)) { + log.warn( + "Requested the update of the status of a recall task group to status {} that is precedent " + + "to the recorded status performing the request the same...", + newStatusId, oldStatusId); + } + String timestampColumn = null; + if (TapeRecallStatus.isFinalStatus(newStatusId)) { + timestampColumn = COL_FINAL_STATUS_DATE; + } else { + if (TapeRecallStatus.IN_PROGRESS + .equals(TapeRecallStatus.getRecallTaskStatus(newStatusId))) { + timestampColumn = COL_IN_PROGRESS_DATE; + } else { + log.warn( + "unable to determine the status update timestamp column to use given the new statusId '{}'", + newStatusId); + } + } + if (timestampColumn != null) { + ps = sqlHelper.getQueryUpdateGroupTaskStatus(con, groupTaskId, newStatusId, + timestampColumn, timestamp); + } else { + ps = sqlHelper.getQuerySetGroupTaskStatus(con, groupTaskId, newStatusId); + } + if (ps.executeUpdate() > 0) { + ret = true; + } + } else { + log.warn( + "Skipping the status upadate operation, the status already stored is equal to the new one provided"); + } + } catch (IllegalArgumentException | SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return ret; + } } diff --git a/src/main/java/it/grid/storm/persistence/impl/mysql/VolatileAndJiTDAOMySql.java b/src/main/java/it/grid/storm/persistence/impl/mysql/VolatileAndJiTDAOMySql.java new file mode 100644 index 000000000..f681a1330 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/VolatileAndJiTDAOMySql.java @@ -0,0 +1,596 @@ +package it.grid.storm.persistence.impl.mysql; + + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; + +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.VolatileAndJiTDAO; +import it.grid.storm.persistence.model.JiTData; +import it.grid.storm.persistence.pool.impl.StormDbConnectionPool; + +/** + * DAO class for VolatileAndJiTCatalog: it has been specifically designed for MySQL. + * + * @author EGRID ICTP + * @version 1.0 (based on old PinnedFilesDAO) + * @date November, 2006 + */ +public class VolatileAndJiTDAOMySql extends AbstractDAO implements VolatileAndJiTDAO { + + private static final Logger log = LoggerFactory.getLogger(VolatileAndJiTDAOMySql.class); + + private static VolatileAndJiTDAO instance; + + public static synchronized VolatileAndJiTDAO getInstance() { + if (instance == null) { + instance = new VolatileAndJiTDAOMySql(); + } + return instance; + } + + private VolatileAndJiTDAOMySql() { + super(StormDbConnectionPool.getInstance()); + } + + /** + * Method that inserts a new entry in the JiT table of the DB, consisting of the specified + * filename, the local user uid, the local user gid, the acl, the start time as expressed by UNIX + * epoch (seconds since 00:00:00 1 1 1970) and the number of seconds the jit must last. + * + * In the DB, the start time gets translated into DATE:TIME in order to make it more readable. + * pinLifetime remains in seconds. + */ + public void addJiT(String filename, int uid, int gid, int acl, long start, long pinLifetime) { + + String sql = + "INSERT INTO jit(file,uid,gid,acl,start,pinLifetime) VALUES(?,?,?,?,FROM_UNIXTIME(?),?)"; + + Connection con = null; + PreparedStatement stmt = null; + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setString(1, filename); + stmt.setInt(2, uid); + stmt.setInt(3, gid); + stmt.setInt(4, acl); + stmt.setLong(5, start); + stmt.setLong(6, pinLifetime); + log.debug("VolatileAndJiTDAO. addJiT: {}", stmt); + stmt.execute(); + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in addJiT: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * Method that inserts a new entry in the Volatile table of the DB, consisting of the specified + * filename, the start time as expressed by UNIX epoch (seconds since 00:00:00 1 1 1970), and the + * number of seconds the file must be kept for. + * + * In the DB, the start time gets translated into DATE:TIME in order to make it more readable. + * pinLifetime remains in seconds. + */ + public void addVolatile(String filename, long start, long fileLifetime) { + + String sql = "INSERT INTO volatile(file,start,fileLifetime) VALUES(?,FROM_UNIXTIME(?),?)"; + + Connection con = null; + PreparedStatement stmt = null; + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setString(1, filename); + stmt.setLong(2, start); + stmt.setLong(3, fileLifetime); + log.debug("VolatileAndJiTDAO. addVolatile: {}", stmt); + stmt.execute(); + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in addVolatile: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * Checks whether the given file exists in the volatile table or not. + * + * @param filename + * @return true if there is antry for the given file in the volatilte table, + * false otherwise. + */ + public boolean exists(String filename) { + + String sql = "SELECT ID FROM volatile WHERE file=? LIMIT 1"; + + Connection con = null; + PreparedStatement stmt = null; + ResultSet rs = null; + boolean result; + + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setString(1, filename); + log.debug("VolatileAndJiTDAO - existsOnVolatile - {}", stmt); + rs = stmt.executeQuery(); + + if (rs.next()) { + result = true; + } else { + result = false; + } + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in existsOnVolatile: {}", e.getMessage(), e); + e.printStackTrace(); + result = false; + } finally { + closeResultSet(rs); + closeStatement(stmt); + closeConnection(con); + } + return result; + } + + /** + * Method that updates an existing entry in the JiT table of the DB, consisting of the specified + * filename, the uid and gid of the local user, the acl, the start time as expressed by UNIX epoch + * (seconds since 00:00:00 1 1 1970), and the number of seconds the jit must last. + * + * In the DB, the start time gets translated into DATE:TIME in order to make it more readable. + * pinLifetime remains in seconds. + * + * This method _forces_ the update regardless of the fact that the new expiry lasts less than the + * current one! This method is intended to be used by expireJiT. + * + * Only start and pinLifetime get updated, while filename, uid, gid and acl, are used as criteria + * to select records. + */ + public void forceUpdateJiT(String filename, int uid, int acl, long start, long pinLifetime) { + + String sql = "UPDATE jit " + "SET start=FROM_UNIXTIME(?), pinLifetime=? " + + "WHERE file=? AND uid=? AND acl=?"; + + Connection con = null; + PreparedStatement stmt = null; + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setLong(1, start); + stmt.setLong(2, pinLifetime); + stmt.setString(3, filename); + stmt.setInt(4, uid); + stmt.setInt(5, acl); + log.debug("VolatileAndJiTDAO. forceUpdateJiT: {}", stmt); + int n = stmt.executeUpdate(); + log.debug("VolatileAndJiTDAO. {} jit entries forced updated.", n); + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in forceUpdateJiT: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * Method that returns the number of entries in the catalogue, matching the given filename, uid + * and acl. + * + * Notice that in general there should be either one or none, and more should be taken as + * indication of catalogue corruption. + * + * -1 is returned if there are problems with the DB. + */ + public int numberJiT(String filename, int uid, int acl) { + + String sql = "SELECT COUNT(ID) FROM jit WHERE file=? AND uid=? AND acl=?"; + + Connection con = null; + PreparedStatement stmt = null; + ResultSet rs = null; + int n = -1; + + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setString(1, filename); + stmt.setInt(2, uid); + stmt.setInt(3, acl); + log.debug("VolatileAndJiTDAO. numberJiT: {}", stmt); + rs = stmt.executeQuery(); + + if (rs.next()) { + n = rs.getInt(1); + } else { + log.error("VolatileAndJiTDAO! Unexpected situation in numberJiT: " + "result set empty!"); + } + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in numberJiT: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(stmt); + closeConnection(con); + } + return n; + } + + /** + * Method that returns the number of Volatile entries in the catalogue, for the given filename. + * + * Notice that in general there should be either one or none, and more should be taken as + * indication of catalogue corruption. + * + * -1 is returned if there are problems with the DB. + */ + public int numberVolatile(String filename) { + + String sql = "SELECT COUNT(ID) FROM volatile WHERE file=?"; + + Connection con = null; + PreparedStatement stmt = null; + ResultSet rs = null; + int n = -1; + + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setString(1, filename); + log.debug("VolatileAndJiTDAO. numberVolatile: {}", stmt); + rs = stmt.executeQuery(); + if (rs.next()) { + n = rs.getInt(1); + } else { + log.error( + "VolatileAndJiTDAO! Unexpected situation in numberVolatile: " + "result set empty!"); + } + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in numberVolatile: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(stmt); + closeConnection(con); + } + return n; + } + + /** + * Method that removes all entries in the JiT table of the DB, that match the specified filename. + * So this action takes place _regardless_ of the user that set up the ACL! + */ + public void removeAllJiTsOn(String filename) { + + String sql = "DELETE FROM jit WHERE file=?"; + + Connection con = null; + PreparedStatement stmt = null; + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setString(1, filename); + log.debug("VolatileAndJiTDAO. removeJiT: {}", stmt); + int n = stmt.executeUpdate(); + log.debug("VolatileAndJiTDAO. removeJiT: {} entries removed", n); + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in removeJiT: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * Method used to remove all expired entries, both of pinned files and of jit ACLs. Also, when + * removing volatile entries, any jit entry that refers to those expired volatiles will also be + * removed. + * + * The method requires a long representing the time measured as UNIX EPOCH upon which to base the + * purging: entries are evaluated expired when compared to this date. + * + * The method returns an array of two Collections; Collection[0] contains expired volatile entries + * String PFNs, while Collection[1] contains JiTDataTO objects. Collection[1] also contains those + * entries that may not have expired yet, but since the respective Volatile is being removed they + * too must be removed automatically. + * + * WARNING! If any error occurs it gets logged, and an array of two empty Collection is returned. + * This operation is treated as a Transaction by the DB, so a Roll Back should return everything + * to its original state! + */ + public List removeExpired(long time) { + + List output = Lists.newArrayList(Lists.newArrayList(), Lists.newArrayList()); + + String vol = "SELECT ID,file FROM volatile WHERE (UNIX_TIMESTAMP(start)+fileLifetime volat = Lists.newArrayList(); + Collection volatid = Lists.newArrayList(); + while (rs.next()) { + volatid.add(Long.valueOf(rs.getLong("ID"))); + volat.add(rs.getString("file")); + } + int nvolat = volatid.size(); + closeResultSet(rs); + closeStatement(stmt); + + // get list of jits + if (nvolat > 0) { + // there are expired volatile entries: adjust jit selection to include + // those SURLs too! + jit = jit + " OR file IN " + makeFileString(volat); + } + stmt = con.prepareStatement(jit); + stmt.setLong(1, time); + log.debug("VolatileAndJiTDAO. removeExpired: {}", stmt); + rs = stmt.executeQuery(); + + Collection track = Lists.newArrayList(); + Collection trackid = Lists.newArrayList(); + + while (rs.next()) { + trackid.add(Long.valueOf(rs.getLong("ID"))); + JiTData aux = + new JiTData(rs.getString("file"), rs.getInt("acl"), rs.getInt("uid"), rs.getInt("gid")); + track.add(aux); + } + int njit = trackid.size(); + closeResultSet(rs); + closeStatement(stmt); + + // remove entries + Collection volcol = Lists.newArrayList(); + Collection jitcol = Lists.newArrayList(); + try { + con.setAutoCommit(false); // begin transaction! + // delete volatile + int deletedvol = 0; + if (nvolat > 0) { + delvol = delvol + makeIDString(volatid); + stmt = con.prepareStatement(delvol); + log.debug("VolatileAndJiTDAO. removeExpired: {}", stmt); + deletedvol = stmt.executeUpdate(); + closeStatement(stmt); + } + // delete jits + int deletedjit = 0; + if (njit > 0) { + deljit = deljit + makeIDString(trackid); + stmt = con.prepareStatement(deljit); + log.debug("VolatileAndJiTDAO. removeExpired: {}", stmt); + deletedjit = stmt.executeUpdate(); + closeStatement(stmt); + } + con.commit(); + con.setAutoCommit(true); // end transaction! + log.debug("VolatileAndJiTDAO. Removed {} volatile catalogue entries " + + "and {} jit catalogue entries.", deletedvol, deletedjit); + volcol = volat; + jitcol = track; + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Unable to complete removeExpired... " + "rolling back! {}", + e.getMessage(), e); + con.rollback(); + closeStatement(stmt); + } + + // return collections + return Lists.newArrayList(volcol, jitcol); + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Unable to complete removeExpired! {}", e.getMessage(), e); + // in case of any failure return an array of two empty Collection + return output; + } finally { + closeResultSet(rs); + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * Method that updates an existing entry in the JiT table of the DB, consisting of the specified + * filename, the uid and gid of the local user, the acl, the start time as expressed by UNIX epoch + * (seconds since 00:00:00 1 1 1970), and the number of seconds the jit must last. + * + * In the DB, the start time gets translated into DATE:TIME in order to make it more readable. + * pinLifetime remains in seconds. + * + * Entries get updated only if the new expiry calculated by adding start and pinLifetime, is + * larger than the existing one. + * + * Only start and pinLifetime get updated, while filename, uid, gid and acl, are used as criteria + * to select records. + */ + public void updateJiT(String filename, int uid, int acl, long start, long pinLifetime) { + + String sql = "UPDATE jit SET start=FROM_UNIXTIME(?), pinLifetime=? " + + "WHERE file=? AND uid=? AND acl=? AND (UNIX_TIMESTAMP(start)+pinLifetime volatileInfoOn(String filename) { + + String sql = "SELECT UNIX_TIMESTAMP(start), fileLifetime FROM volatile WHERE file=?"; + + Connection con = null; + PreparedStatement stmt = null; + ResultSet rs = null; + List aux = Lists.newArrayList(); + + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setString(1, filename); + log.debug("VolatileAndJiTDAO - infoOnVolatile - {}", stmt); + rs = stmt.executeQuery(); + if (rs.next()) { + aux.add(rs.getLong("UNIX_TIMESTAMP(start)")); + aux.add(rs.getLong("fileLifetime")); + } else { + log.debug("VolatileAndJiTDAO! infoOnVolatile did not find {}", filename); + } + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in infoOnVolatile: {}", e.getMessage(), e); + } finally { + closeResultSet(rs); + closeStatement(stmt); + closeConnection(con); + } + return aux; + } + + /** + * Method that returns a String containing all Files. + */ + private String makeFileString(Collection files) { + + StringBuilder sb = new StringBuilder("("); + for (Iterator i = files.iterator(); i.hasNext();) { + sb.append("'"); + sb.append(i.next()); + sb.append("'"); + if (i.hasNext()) { + sb.append(","); + } + } + sb.append(")"); + return sb.toString(); + } + + /** + * Method that returns a String containing all IDs. + */ + private String makeIDString(Collection rowids) { + + StringBuilder sb = new StringBuilder("("); + for (Iterator i = rowids.iterator(); i.hasNext();) { + sb.append(String.valueOf(i.next())); + if (i.hasNext()) { + sb.append(","); + } + } + sb.append(")"); + return sb.toString(); + } +} diff --git a/src/main/java/it/grid/storm/catalogs/AnonymousFileTransferData.java b/src/main/java/it/grid/storm/persistence/model/AnonymousFileTransferData.java similarity index 88% rename from src/main/java/it/grid/storm/catalogs/AnonymousFileTransferData.java rename to src/main/java/it/grid/storm/persistence/model/AnonymousFileTransferData.java index c0d8c428f..6aa0e6e1f 100644 --- a/src/main/java/it/grid/storm/catalogs/AnonymousFileTransferData.java +++ b/src/main/java/it/grid/storm/persistence/model/AnonymousFileTransferData.java @@ -2,9 +2,11 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TTURL; diff --git a/src/main/java/it/grid/storm/catalogs/AnonymousPtGData.java b/src/main/java/it/grid/storm/persistence/model/AnonymousPtGData.java similarity index 94% rename from src/main/java/it/grid/storm/catalogs/AnonymousPtGData.java rename to src/main/java/it/grid/storm/persistence/model/AnonymousPtGData.java index 9bbc05fe7..dd8e1bb27 100644 --- a/src/main/java/it/grid/storm/catalogs/AnonymousPtGData.java +++ b/src/main/java/it/grid/storm/persistence/model/AnonymousPtGData.java @@ -2,9 +2,12 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtGDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; import it.grid.storm.srm.types.TDirOption; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TReturnStatus; diff --git a/src/main/java/it/grid/storm/catalogs/AnonymousPtPData.java b/src/main/java/it/grid/storm/persistence/model/AnonymousPtPData.java similarity index 95% rename from src/main/java/it/grid/storm/catalogs/AnonymousPtPData.java rename to src/main/java/it/grid/storm/persistence/model/AnonymousPtPData.java index 796bfadf0..5621917a1 100644 --- a/src/main/java/it/grid/storm/catalogs/AnonymousPtPData.java +++ b/src/main/java/it/grid/storm/persistence/model/AnonymousPtPData.java @@ -2,11 +2,14 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtPDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; import it.grid.storm.srm.types.TFileStorageType; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TOverwriteMode; diff --git a/src/main/java/it/grid/storm/persistence/model/BoLChunkDataTO.java b/src/main/java/it/grid/storm/persistence/model/BoLChunkDataTO.java new file mode 100644 index 000000000..41e0242c1 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/BoLChunkDataTO.java @@ -0,0 +1,255 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.model; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.namespace.model.Protocol; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.converter.TransferProtocolListConverter; +import it.grid.storm.srm.types.TStatusCode; + +import java.sql.Timestamp; +import java.util.List; + +/** + * Class that represents a row in the Persistence Layer: this is all raw data referring to the + * BoLChunkData proper, that is, String and primitive types. + * + * Each field is initialized with default values as per SRM 2.2 specification: protocolList GSIFTP + * dirOption false status SRM_REQUEST_QUEUED + * + * All other fields are 0 if int, or a white space if String. + * + * @author CNAF + * @version 1.0 + * @date Aug 2009 + */ +public class BoLChunkDataTO { + + /* Database table request_Bol fields BEGIN */ + private long primaryKey = -1; // ID primary key of record in DB + private String fromSURL = " "; + private boolean dirOption; // initialised in constructor + private String normalizedStFN = null; + private Integer surlUniqueID = null; + /* Database table request_Get fields END */ + + private String requestToken = " "; + private int lifetime = 0; + private boolean allLevelRecursive; // initialised in constructor + private int numLevel; // initialised in constructor + private List protocolList = null; // initialised in constructor + private long filesize = 0; + private int status; // initialised in constructor + private String errString = " "; + private int deferredStartTime = -1; + private Timestamp timeStamp = null; + + public BoLChunkDataTO() { + + TURLPrefix protocolPreferences = new TURLPrefix(); + protocolPreferences.addProtocol(Protocol.GSIFTP); + this.protocolList = TransferProtocolListConverter.toDB(protocolPreferences); + this.status = StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED); + this.dirOption = false; + this.allLevelRecursive = false; + this.numLevel = 0; + } + + public boolean getAllLevelRecursive() { + + return allLevelRecursive; + } + + public int getDeferredStartTime() { + + return deferredStartTime; + } + + public boolean getDirOption() { + + return dirOption; + } + + public String getErrString() { + + return errString; + } + + public long getFileSize() { + + return filesize; + } + + public String getFromSURL() { + + return fromSURL; + } + + public int getLifeTime() { + + return lifetime; + } + + public int getNumLevel() { + + return numLevel; + } + + public long getPrimaryKey() { + + return primaryKey; + } + + public List getProtocolList() { + + return protocolList; + } + + public String getRequestToken() { + + return requestToken; + } + + public Timestamp getTimeStamp() { + + return timeStamp; + } + + public int getStatus() { + + return status; + } + + public void setAllLevelRecursive(boolean b) { + + allLevelRecursive = b; + } + + public void setDeferredStartTime(int deferredStartTime) { + + this.deferredStartTime = deferredStartTime; + } + + public void setDirOption(boolean b) { + + dirOption = b; + } + + public void setErrString(String s) { + + errString = s; + } + + public void setFileSize(long n) { + + filesize = n; + } + + public void setFromSURL(String s) { + + fromSURL = s; + } + + public void setLifeTime(int n) { + + lifetime = n; + } + + public void setNumLevel(int n) { + + numLevel = n; + } + + public void setPrimaryKey(long n) { + + primaryKey = n; + } + + public void setProtocolList(List l) { + + if ((l != null) && (!l.isEmpty())) { + protocolList = l; + } + } + + public void setRequestToken(String s) { + + requestToken = s; + } + + public void setTimeStamp(Timestamp timeStamp) { + + this.timeStamp = timeStamp; + } + + public void setStatus(int n) { + + status = n; + } + + /** + * @param normalizedStFN the normalizedStFN to set + */ + public void setNormalizedStFN(String normalizedStFN) { + + this.normalizedStFN = normalizedStFN; + } + + /** + * @return the normalizedStFN + */ + public String normalizedStFN() { + + return normalizedStFN; + } + + /** + * @param surlUniqueID the sURLUniqueID to set + */ + public void setSurlUniqueID(Integer surlUniqueID) { + + this.surlUniqueID = surlUniqueID; + } + + /** + * @return the sURLUniqueID + */ + public Integer sulrUniqueID() { + + return surlUniqueID; + } + + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append(primaryKey); + sb.append(" "); + sb.append(requestToken); + sb.append(" "); + sb.append(fromSURL); + sb.append(" "); + sb.append(normalizedStFN); + sb.append(" "); + sb.append(surlUniqueID); + sb.append(" "); + sb.append(lifetime); + sb.append(" "); + sb.append(dirOption); + sb.append(" "); + sb.append(allLevelRecursive); + sb.append(" "); + sb.append(numLevel); + sb.append(" "); + sb.append(protocolList); + sb.append(" "); + sb.append(filesize); + sb.append(" "); + sb.append(status); + sb.append(" "); + sb.append(errString); + return sb.toString(); + } +} \ No newline at end of file diff --git a/src/main/java/it/grid/storm/catalogs/BoLData.java b/src/main/java/it/grid/storm/persistence/model/BoLData.java similarity index 92% rename from src/main/java/it/grid/storm/catalogs/BoLData.java rename to src/main/java/it/grid/storm/persistence/model/BoLData.java index 00ea23626..30ecbd68f 100644 --- a/src/main/java/it/grid/storm/catalogs/BoLData.java +++ b/src/main/java/it/grid/storm/persistence/model/BoLData.java @@ -2,10 +2,13 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.common.types.TURLPrefix; import it.grid.storm.common.types.TimeUnit; +import it.grid.storm.persistence.exceptions.InvalidBoLDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; import it.grid.storm.srm.types.TDirOption; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TReturnStatus; diff --git a/src/main/java/it/grid/storm/catalogs/BoLPersistentChunkData.java b/src/main/java/it/grid/storm/persistence/model/BoLPersistentChunkData.java similarity index 88% rename from src/main/java/it/grid/storm/catalogs/BoLPersistentChunkData.java rename to src/main/java/it/grid/storm/persistence/model/BoLPersistentChunkData.java index 665fd94ed..b4ca0d8dc 100644 --- a/src/main/java/it/grid/storm/catalogs/BoLPersistentChunkData.java +++ b/src/main/java/it/grid/storm/persistence/model/BoLPersistentChunkData.java @@ -2,9 +2,13 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.persistence.exceptions.InvalidBoLDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidBoLPersistentChunkDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; import it.grid.storm.srm.types.TDirOption; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TRequestToken; diff --git a/src/main/java/it/grid/storm/catalogs/ChunkData.java b/src/main/java/it/grid/storm/persistence/model/ChunkData.java similarity index 87% rename from src/main/java/it/grid/storm/catalogs/ChunkData.java rename to src/main/java/it/grid/storm/persistence/model/ChunkData.java index 4aa2aa8f6..2c0f51a32 100644 --- a/src/main/java/it/grid/storm/catalogs/ChunkData.java +++ b/src/main/java/it/grid/storm/persistence/model/ChunkData.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; public interface ChunkData extends RequestData { diff --git a/src/main/java/it/grid/storm/catalogs/FileTransferData.java b/src/main/java/it/grid/storm/persistence/model/FileTransferData.java similarity index 94% rename from src/main/java/it/grid/storm/catalogs/FileTransferData.java rename to src/main/java/it/grid/storm/persistence/model/FileTransferData.java index 352d6c52c..c0d9aba6e 100644 --- a/src/main/java/it/grid/storm/catalogs/FileTransferData.java +++ b/src/main/java/it/grid/storm/persistence/model/FileTransferData.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.common.types.TURLPrefix; import it.grid.storm.srm.types.TTURL; diff --git a/src/main/java/it/grid/storm/catalogs/IdentityPtGData.java b/src/main/java/it/grid/storm/persistence/model/IdentityPtGData.java similarity index 84% rename from src/main/java/it/grid/storm/catalogs/IdentityPtGData.java rename to src/main/java/it/grid/storm/persistence/model/IdentityPtGData.java index f740039d0..0a9199c1f 100644 --- a/src/main/java/it/grid/storm/catalogs/IdentityPtGData.java +++ b/src/main/java/it/grid/storm/persistence/model/IdentityPtGData.java @@ -2,10 +2,13 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.common.types.TURLPrefix; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtGDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; import it.grid.storm.srm.types.TDirOption; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TReturnStatus; diff --git a/src/main/java/it/grid/storm/catalogs/IdentityPtPData.java b/src/main/java/it/grid/storm/persistence/model/IdentityPtPData.java similarity index 84% rename from src/main/java/it/grid/storm/catalogs/IdentityPtPData.java rename to src/main/java/it/grid/storm/persistence/model/IdentityPtPData.java index 75840ad74..7d8721c82 100644 --- a/src/main/java/it/grid/storm/catalogs/IdentityPtPData.java +++ b/src/main/java/it/grid/storm/persistence/model/IdentityPtPData.java @@ -5,10 +5,14 @@ /** * */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.common.types.TURLPrefix; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtGDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtPDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; import it.grid.storm.srm.types.TFileStorageType; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TOverwriteMode; diff --git a/src/main/java/it/grid/storm/persistence/model/JiTData.java b/src/main/java/it/grid/storm/persistence/model/JiTData.java new file mode 100644 index 000000000..f7f3b7626 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/JiTData.java @@ -0,0 +1,58 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.model; + +/** + * Class that represents data associated to JiT entries. It contains a String representing the file, + * an int representing the ACL, an int representing the user UID, an int representing the user GID. + * + * @author EGRID - ICTP Trieste + * @version 1.0 + * @date November 2006 + */ +public class JiTData { + + private String file = ""; + private int uid = -1; + private int gid = -1; + private int acl = -1; + + /** + * Constructor requiring the complete name of the file as String, the acl as int, the uid and + * primary gid of the LocalUser bith as int. + */ + public JiTData(String file, int acl, int uid, int gid) { + + this.file = file; + this.acl = acl; + this.uid = uid; + this.gid = gid; + } + + public String pfn() { + + return file; + } + + public int acl() { + + return acl; + } + + public int uid() { + + return uid; + } + + public int gid() { + + return gid; + } + + public String toString() { + + return "file=" + file + " acl=" + acl + " uid=" + uid + " gid=" + gid; + } +} diff --git a/src/main/java/it/grid/storm/catalogs/PersistentChunkData.java b/src/main/java/it/grid/storm/persistence/model/PersistentChunkData.java similarity index 87% rename from src/main/java/it/grid/storm/catalogs/PersistentChunkData.java rename to src/main/java/it/grid/storm/persistence/model/PersistentChunkData.java index 95ed83b9a..279d66e77 100644 --- a/src/main/java/it/grid/storm/catalogs/PersistentChunkData.java +++ b/src/main/java/it/grid/storm/persistence/model/PersistentChunkData.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; public interface PersistentChunkData extends ChunkData { diff --git a/src/main/java/it/grid/storm/catalogs/PtGChunkDataTO.java b/src/main/java/it/grid/storm/persistence/model/PtGChunkDataTO.java similarity index 96% rename from src/main/java/it/grid/storm/catalogs/PtGChunkDataTO.java rename to src/main/java/it/grid/storm/persistence/model/PtGChunkDataTO.java index d5dab7f02..02b57ce70 100644 --- a/src/main/java/it/grid/storm/catalogs/PtGChunkDataTO.java +++ b/src/main/java/it/grid/storm/persistence/model/PtGChunkDataTO.java @@ -2,13 +2,15 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.common.types.TURLPrefix; import java.sql.Timestamp; import java.util.List; import it.grid.storm.namespace.model.Protocol; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.converter.TransferProtocolListConverter; /** * Class that represents a row in the Persistence Layer: this is all raw data diff --git a/src/main/java/it/grid/storm/catalogs/PtGData.java b/src/main/java/it/grid/storm/persistence/model/PtGData.java similarity index 96% rename from src/main/java/it/grid/storm/catalogs/PtGData.java rename to src/main/java/it/grid/storm/persistence/model/PtGData.java index 6e8518239..cf2dc7c36 100644 --- a/src/main/java/it/grid/storm/catalogs/PtGData.java +++ b/src/main/java/it/grid/storm/persistence/model/PtGData.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.srm.types.TDirOption; import it.grid.storm.srm.types.TLifeTimeInSeconds; diff --git a/src/main/java/it/grid/storm/catalogs/PtGPersistentChunkData.java b/src/main/java/it/grid/storm/persistence/model/PtGPersistentChunkData.java similarity index 93% rename from src/main/java/it/grid/storm/catalogs/PtGPersistentChunkData.java rename to src/main/java/it/grid/storm/persistence/model/PtGPersistentChunkData.java index a6fe466ab..ff8953f20 100644 --- a/src/main/java/it/grid/storm/catalogs/PtGPersistentChunkData.java +++ b/src/main/java/it/grid/storm/persistence/model/PtGPersistentChunkData.java @@ -2,10 +2,14 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.common.types.TURLPrefix; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtGDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtGPersistentChunkDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; import it.grid.storm.srm.types.TDirOption; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TRequestToken; diff --git a/src/main/java/it/grid/storm/persistence/model/PtPChunkDataTO.java b/src/main/java/it/grid/storm/persistence/model/PtPChunkDataTO.java new file mode 100644 index 000000000..a198d24a1 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/PtPChunkDataTO.java @@ -0,0 +1,314 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.model; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.config.StormConfiguration; +import it.grid.storm.namespace.model.Protocol; +import it.grid.storm.persistence.converter.FileStorageTypeConverter; +import it.grid.storm.persistence.converter.OverwriteModeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.converter.TransferProtocolListConverter; +import it.grid.storm.srm.types.TFileStorageType; +import it.grid.storm.srm.types.TOverwriteMode; +import it.grid.storm.srm.types.TStatusCode; + +import java.sql.Timestamp; +import java.util.List; + +public class PtPChunkDataTO { + + private static final String FQAN_SEPARATOR = "#"; + /* Database table request_Get fields BEGIN */ + private long primaryKey = -1; // ID primary key of status_Put record in DB + private String toSURL = " "; + private long expectedFileSize = 0; + private String normalizedStFN = null; + private Integer surlUniqueID = null; + /* Database table request_Get fields END */ + + private String requestToken = " "; + private int pinLifetime = -1; + private int fileLifetime = -1; + private String fileStorageType = null; // initialized in constructor + private String spaceToken = " "; + private List protocolList = null; // initialized in constructor + private String overwriteOption = null; // initialized in constructor + private int status; // initialized in constructor + private String errString = " "; + private String turl = " "; + private Timestamp timeStamp = null; + + private String clientDN = null; + private String vomsAttributes = null; + + + public PtPChunkDataTO() { + + this.fileStorageType = FileStorageTypeConverter.getInstance() + .toDB(TFileStorageType + .getTFileStorageType(StormConfiguration.getInstance().getDefaultFileStorageType())); + TURLPrefix protocolPreferences = new TURLPrefix(); + protocolPreferences.addProtocol(Protocol.GSIFTP); + this.protocolList = TransferProtocolListConverter.toDB(protocolPreferences); + this.overwriteOption = OverwriteModeConverter.toDB(TOverwriteMode.NEVER); + this.status = StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED); + } + + public long primaryKey() { + + return primaryKey; + } + + public void setPrimaryKey(long n) { + + primaryKey = n; + } + + public String requestToken() { + + return requestToken; + } + + public void setRequestToken(String s) { + + requestToken = s; + } + + public Timestamp timeStamp() { + + return timeStamp; + } + + public void setTimeStamp(Timestamp timeStamp) { + + this.timeStamp = timeStamp; + } + + public String toSURL() { + + return toSURL; + } + + public void setToSURL(String s) { + + toSURL = s; + } + + /** + * @return the normalizedStFN + */ + public String normalizedStFN() { + + return normalizedStFN; + } + + /** + * @param normalizedStFN the normalizedStFN to set + */ + public void setNormalizedStFN(String normalizedStFN) { + + this.normalizedStFN = normalizedStFN; + } + + /** + * @return the surlUniqueID + */ + public Integer surlUniqueID() { + + return surlUniqueID; + } + + /** + * @param surlUniqueID the surlUniqueID to set + */ + public void setSurlUniqueID(Integer surlUniqueID) { + + this.surlUniqueID = surlUniqueID; + } + + public int pinLifetime() { + + return pinLifetime; + } + + public void setPinLifetime(int n) { + + pinLifetime = n; + } + + public int fileLifetime() { + + return fileLifetime; + } + + public void setFileLifetime(int n) { + + fileLifetime = n; + } + + public String fileStorageType() { + + return fileStorageType; + } + + /** + * Method that sets the FileStorageType: if it is null nothing gets set. The deafult value is + * Permanent. + */ + public void setFileStorageType(String s) { + + if (s != null) + fileStorageType = s; + } + + public String spaceToken() { + + return spaceToken; + } + + public void setSpaceToken(String s) { + + spaceToken = s; + } + + public long expectedFileSize() { + + return expectedFileSize; + } + + public void setExpectedFileSize(long l) { + + expectedFileSize = l; + } + + public List protocolList() { + + return protocolList; + } + + public void setProtocolList(List l) { + + if ((l != null) && (!l.isEmpty())) + protocolList = l; + } + + public String overwriteOption() { + + return overwriteOption; + } + + /** + * Method that sets the OverwriteMode: if it is null nothing gets set. The deafult value is Never. + */ + public void setOverwriteOption(String s) { + + if (s != null) + overwriteOption = s; + } + + public int status() { + + return status; + } + + public void setStatus(int n) { + + status = n; + } + + public String errString() { + + return errString; + } + + public void setErrString(String s) { + + errString = s; + } + + public String transferURL() { + + return turl; + } + + public void setTransferURL(String s) { + + turl = s; + } + + public String clientDN() { + + return clientDN; + } + + public void setClientDN(String s) { + + clientDN = s; + } + + public String vomsAttributes() { + + return vomsAttributes; + } + + public void setVomsAttributes(String s) { + + vomsAttributes = s; + } + + public void setVomsAttributes(String[] fqaNsAsString) { + + vomsAttributes = ""; + for (int i = 0; i < fqaNsAsString.length; i++) { + vomsAttributes += fqaNsAsString[i]; + if (i < fqaNsAsString.length - 1) { + vomsAttributes += FQAN_SEPARATOR; + } + } + + } + + public String[] vomsAttributesArray() { + + return vomsAttributes.split(FQAN_SEPARATOR); + } + + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append(primaryKey); + sb.append(" "); + sb.append(requestToken); + sb.append(" "); + sb.append(toSURL); + sb.append(" "); + sb.append(normalizedStFN); + sb.append(" "); + sb.append(surlUniqueID); + sb.append(" "); + sb.append(pinLifetime); + sb.append(" "); + sb.append(fileLifetime); + sb.append(" "); + sb.append(fileStorageType); + sb.append(" "); + sb.append(spaceToken); + sb.append(" "); + sb.append(expectedFileSize); + sb.append(" "); + sb.append(protocolList); + sb.append(" "); + sb.append(overwriteOption); + sb.append(" "); + sb.append(status); + sb.append(" "); + sb.append(errString); + sb.append(" "); + sb.append(turl); + return sb.toString(); + } + +} diff --git a/src/main/java/it/grid/storm/catalogs/PtPData.java b/src/main/java/it/grid/storm/persistence/model/PtPData.java similarity index 97% rename from src/main/java/it/grid/storm/catalogs/PtPData.java rename to src/main/java/it/grid/storm/persistence/model/PtPData.java index 3a2cd1097..c9f907e19 100644 --- a/src/main/java/it/grid/storm/catalogs/PtPData.java +++ b/src/main/java/it/grid/storm/persistence/model/PtPData.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.srm.types.TFileStorageType; import it.grid.storm.srm.types.TLifeTimeInSeconds; diff --git a/src/main/java/it/grid/storm/catalogs/PtPPersistentChunkData.java b/src/main/java/it/grid/storm/persistence/model/PtPPersistentChunkData.java similarity index 93% rename from src/main/java/it/grid/storm/catalogs/PtPPersistentChunkData.java rename to src/main/java/it/grid/storm/persistence/model/PtPPersistentChunkData.java index b981f3aba..63ab8571f 100644 --- a/src/main/java/it/grid/storm/catalogs/PtPPersistentChunkData.java +++ b/src/main/java/it/grid/storm/persistence/model/PtPPersistentChunkData.java @@ -2,13 +2,17 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import it.grid.storm.common.types.TURLPrefix; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtPDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtPPersistentChunkDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; import it.grid.storm.srm.types.TFileStorageType; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TOverwriteMode; diff --git a/src/main/java/it/grid/storm/catalogs/ReducedBoLChunkData.java b/src/main/java/it/grid/storm/persistence/model/ReducedBoLChunkData.java similarity index 95% rename from src/main/java/it/grid/storm/catalogs/ReducedBoLChunkData.java rename to src/main/java/it/grid/storm/persistence/model/ReducedBoLChunkData.java index d7b3c0e82..996c92048 100644 --- a/src/main/java/it/grid/storm/catalogs/ReducedBoLChunkData.java +++ b/src/main/java/it/grid/storm/persistence/model/ReducedBoLChunkData.java @@ -2,8 +2,9 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; +import it.grid.storm.persistence.exceptions.InvalidReducedBoLChunkDataAttributesException; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TStatusCode; diff --git a/src/main/java/it/grid/storm/persistence/model/ReducedBoLChunkDataTO.java b/src/main/java/it/grid/storm/persistence/model/ReducedBoLChunkDataTO.java new file mode 100644 index 000000000..c4d36c1b1 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/ReducedBoLChunkDataTO.java @@ -0,0 +1,117 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.model; + +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.srm.types.TStatusCode; + +/** + * Class that represents some of the fields in a row in the Persistence Layer: this is all raw data + * referring to the ReducedBoLChunkData proper, that is String and primitive types. + * + * @author EGRID ICTP + * @version 1.0 + * @date November, 2006 + */ +public class ReducedBoLChunkDataTO { + + private long primaryKey = -1; // ID primary key of record in DB + private String fromSURL = " "; + private String normalizedStFN = null; + private Integer surlUniqueID = null; + + private int status = StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED); + private String errString = " "; + + public String errString() { + + return errString; + } + + public String fromSURL() { + + return fromSURL; + } + + public long primaryKey() { + + return primaryKey; + } + + public void setErrString(String s) { + + errString = s; + } + + public void setFromSURL(String s) { + + fromSURL = s; + } + + public void setPrimaryKey(long n) { + + primaryKey = n; + } + + public void setStatus(int n) { + + status = n; + } + + public int status() { + + return status; + } + + /** + * @param normalizedStFN the normalizedStFN to set + */ + public void setNormalizedStFN(String normalizedStFN) { + + this.normalizedStFN = normalizedStFN; + } + + /** + * @return the normalizedStFN + */ + public String normalizedStFN() { + + return normalizedStFN; + } + + /** + * @param surlUniqueID the sURLUniqueID to set + */ + public void setSurlUniqueID(Integer surlUniqueID) { + + this.surlUniqueID = surlUniqueID; + } + + /** + * @return the sURLUniqueID + */ + public Integer surlUniqueID() { + + return surlUniqueID; + } + + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append(primaryKey); + sb.append(" "); + sb.append(fromSURL); + sb.append(" "); + sb.append(normalizedStFN); + sb.append(" "); + sb.append(surlUniqueID); + sb.append(" "); + sb.append(status); + sb.append(" "); + sb.append(errString); + sb.append(" "); + return sb.toString(); + } +} diff --git a/src/main/java/it/grid/storm/catalogs/ReducedChunkData.java b/src/main/java/it/grid/storm/persistence/model/ReducedChunkData.java similarity index 90% rename from src/main/java/it/grid/storm/catalogs/ReducedChunkData.java rename to src/main/java/it/grid/storm/persistence/model/ReducedChunkData.java index 79b04e759..93ef0a5d2 100644 --- a/src/main/java/it/grid/storm/catalogs/ReducedChunkData.java +++ b/src/main/java/it/grid/storm/persistence/model/ReducedChunkData.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; diff --git a/src/main/java/it/grid/storm/catalogs/ReducedPtGChunkData.java b/src/main/java/it/grid/storm/persistence/model/ReducedPtGChunkData.java similarity index 95% rename from src/main/java/it/grid/storm/catalogs/ReducedPtGChunkData.java rename to src/main/java/it/grid/storm/persistence/model/ReducedPtGChunkData.java index 2ced971d1..498b05153 100644 --- a/src/main/java/it/grid/storm/catalogs/ReducedPtGChunkData.java +++ b/src/main/java/it/grid/storm/persistence/model/ReducedPtGChunkData.java @@ -2,8 +2,9 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; +import it.grid.storm.persistence.exceptions.InvalidReducedPtGChunkDataAttributesException; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TStatusCode; diff --git a/src/main/java/it/grid/storm/catalogs/ReducedPtGChunkDataTO.java b/src/main/java/it/grid/storm/persistence/model/ReducedPtGChunkDataTO.java similarity index 95% rename from src/main/java/it/grid/storm/catalogs/ReducedPtGChunkDataTO.java rename to src/main/java/it/grid/storm/persistence/model/ReducedPtGChunkDataTO.java index f3c6af0ff..a859c90e9 100644 --- a/src/main/java/it/grid/storm/catalogs/ReducedPtGChunkDataTO.java +++ b/src/main/java/it/grid/storm/persistence/model/ReducedPtGChunkDataTO.java @@ -2,8 +2,9 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; +import it.grid.storm.persistence.converter.StatusCodeConverter; import it.grid.storm.srm.types.TStatusCode; /** diff --git a/src/main/java/it/grid/storm/catalogs/ReducedPtPChunkData.java b/src/main/java/it/grid/storm/persistence/model/ReducedPtPChunkData.java similarity index 97% rename from src/main/java/it/grid/storm/catalogs/ReducedPtPChunkData.java rename to src/main/java/it/grid/storm/persistence/model/ReducedPtPChunkData.java index 0006a86d1..75a7a59bb 100644 --- a/src/main/java/it/grid/storm/catalogs/ReducedPtPChunkData.java +++ b/src/main/java/it/grid/storm/persistence/model/ReducedPtPChunkData.java @@ -2,8 +2,9 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; +import it.grid.storm.persistence.exceptions.InvalidReducedPtPChunkDataAttributesException; import it.grid.storm.srm.types.TFileStorageType; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TReturnStatus; diff --git a/src/main/java/it/grid/storm/catalogs/ReducedPtPChunkDataTO.java b/src/main/java/it/grid/storm/persistence/model/ReducedPtPChunkDataTO.java similarity index 93% rename from src/main/java/it/grid/storm/catalogs/ReducedPtPChunkDataTO.java rename to src/main/java/it/grid/storm/persistence/model/ReducedPtPChunkDataTO.java index f6881c76b..35d71cfbc 100644 --- a/src/main/java/it/grid/storm/catalogs/ReducedPtPChunkDataTO.java +++ b/src/main/java/it/grid/storm/persistence/model/ReducedPtPChunkDataTO.java @@ -2,9 +2,11 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.srm.types.TStatusCode; +import it.grid.storm.persistence.converter.FileStorageTypeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; import it.grid.storm.srm.types.TFileStorageType; /** diff --git a/src/main/java/it/grid/storm/catalogs/RequestData.java b/src/main/java/it/grid/storm/persistence/model/RequestData.java similarity index 97% rename from src/main/java/it/grid/storm/catalogs/RequestData.java rename to src/main/java/it/grid/storm/persistence/model/RequestData.java index 4bae65abf..649c3fe15 100644 --- a/src/main/java/it/grid/storm/catalogs/RequestData.java +++ b/src/main/java/it/grid/storm/persistence/model/RequestData.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.srm.types.TRequestToken; import it.grid.storm.srm.types.TReturnStatus; diff --git a/src/main/java/it/grid/storm/catalogs/RequestSummaryData.java b/src/main/java/it/grid/storm/persistence/model/RequestSummaryData.java similarity index 98% rename from src/main/java/it/grid/storm/catalogs/RequestSummaryData.java rename to src/main/java/it/grid/storm/persistence/model/RequestSummaryData.java index 2fac1c20c..0887523eb 100644 --- a/src/main/java/it/grid/storm/catalogs/RequestSummaryData.java +++ b/src/main/java/it/grid/storm/persistence/model/RequestSummaryData.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TRequestToken; @@ -10,6 +10,7 @@ import it.grid.storm.srm.types.TReturnStatus; // import it.grid.storm.griduser.VomsGridUser; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.exceptions.InvalidRequestSummaryDataAttributesException; /** * This class represents the SummaryData associated with the SRM request. It diff --git a/src/main/java/it/grid/storm/catalogs/RequestSummaryDataTO.java b/src/main/java/it/grid/storm/persistence/model/RequestSummaryDataTO.java similarity index 99% rename from src/main/java/it/grid/storm/catalogs/RequestSummaryDataTO.java rename to src/main/java/it/grid/storm/persistence/model/RequestSummaryDataTO.java index 84f7adeb2..dcebfc38b 100644 --- a/src/main/java/it/grid/storm/catalogs/RequestSummaryDataTO.java +++ b/src/main/java/it/grid/storm/persistence/model/RequestSummaryDataTO.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import java.sql.Timestamp; diff --git a/src/main/java/it/grid/storm/catalogs/SurlMultyOperationRequestData.java b/src/main/java/it/grid/storm/persistence/model/SurlMultyOperationRequestData.java similarity index 97% rename from src/main/java/it/grid/storm/catalogs/SurlMultyOperationRequestData.java rename to src/main/java/it/grid/storm/persistence/model/SurlMultyOperationRequestData.java index dec480599..76c1fd668 100644 --- a/src/main/java/it/grid/storm/catalogs/SurlMultyOperationRequestData.java +++ b/src/main/java/it/grid/storm/persistence/model/SurlMultyOperationRequestData.java @@ -2,9 +2,10 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; import it.grid.storm.srm.types.TRequestToken; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; diff --git a/src/main/java/it/grid/storm/catalogs/SurlRequestData.java b/src/main/java/it/grid/storm/persistence/model/SurlRequestData.java similarity index 98% rename from src/main/java/it/grid/storm/catalogs/SurlRequestData.java rename to src/main/java/it/grid/storm/persistence/model/SurlRequestData.java index 0ba2ed04e..4caa75077 100644 --- a/src/main/java/it/grid/storm/catalogs/SurlRequestData.java +++ b/src/main/java/it/grid/storm/persistence/model/SurlRequestData.java @@ -2,10 +2,11 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import java.util.Map; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TStatusCode; diff --git a/src/main/java/it/grid/storm/catalogs/SynchMultyOperationRequestData.java b/src/main/java/it/grid/storm/persistence/model/SynchMultyOperationRequestData.java similarity index 88% rename from src/main/java/it/grid/storm/catalogs/SynchMultyOperationRequestData.java rename to src/main/java/it/grid/storm/persistence/model/SynchMultyOperationRequestData.java index 0a351c216..25d06d839 100644 --- a/src/main/java/it/grid/storm/catalogs/SynchMultyOperationRequestData.java +++ b/src/main/java/it/grid/storm/persistence/model/SynchMultyOperationRequestData.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.srm.types.TRequestToken; diff --git a/src/main/java/it/grid/storm/persistence/pool/DatabaseConnectionPool.java b/src/main/java/it/grid/storm/persistence/pool/DatabaseConnectionPool.java new file mode 100644 index 000000000..5daaeb947 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/pool/DatabaseConnectionPool.java @@ -0,0 +1,21 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.pool; + +public interface DatabaseConnectionPool { + + public int getMaxTotal(); + + public int getInitialSize(); + + public int getMinIdle(); + + public long getMaxConnLifetimeMillis(); + + public boolean getTestOnBorrow(); + + public boolean getTestWhileIdle(); + +} \ No newline at end of file diff --git a/src/main/java/it/grid/storm/persistence/pool/DatabaseConnector.java b/src/main/java/it/grid/storm/persistence/pool/DatabaseConnector.java new file mode 100644 index 000000000..2f4842a15 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/pool/DatabaseConnector.java @@ -0,0 +1,20 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ + +package it.grid.storm.persistence.pool; + +public interface DatabaseConnector { + + public String getDbName(); + + public String getDriverName(); + + public String getDbUsername(); + + public String getDbPassword(); + + public String getDbURL(); + +} diff --git a/src/main/java/it/grid/storm/persistence/pool/impl/DefaultDatabaseConnectionPool.java b/src/main/java/it/grid/storm/persistence/pool/impl/DefaultDatabaseConnectionPool.java new file mode 100644 index 000000000..d2e24cf0b --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/pool/impl/DefaultDatabaseConnectionPool.java @@ -0,0 +1,104 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.pool.impl; + +import java.sql.Connection; +import java.sql.SQLException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import it.grid.storm.metrics.InstrumentedBasicDataSource; +import it.grid.storm.metrics.StormMetricRegistry; +import it.grid.storm.persistence.pool.DatabaseConnectionPool; +import it.grid.storm.persistence.pool.DatabaseConnector; + +public class DefaultDatabaseConnectionPool implements DatabaseConnectionPool { + + private static final Logger log = LoggerFactory.getLogger(DefaultDatabaseConnectionPool.class); + + private DatabaseConnector dbs; + + private int maxTotal; + private int minIdle; + private int maxConnLifetimeMillis; + private boolean isTestOnBorrow; + private boolean isTestWhileIdle; + + private InstrumentedBasicDataSource bds; + + public DefaultDatabaseConnectionPool(DatabaseConnector dbs, int maxTotal, int minIdle, + int maxConnLifetimeMillis, boolean isTestOnBorrow, boolean isTestWhileIdle) { + + this.dbs = dbs; + this.maxTotal = maxTotal; + this.minIdle = minIdle; + this.maxConnLifetimeMillis = maxConnLifetimeMillis; + this.isTestOnBorrow = isTestOnBorrow; + this.isTestWhileIdle = isTestWhileIdle; + + init(); + } + + private void init() { + + bds = new InstrumentedBasicDataSource(dbs.getDbName(), + StormMetricRegistry.METRIC_REGISTRY.getRegistry()); + + bds.setDriverClassName(dbs.getDriverName()); + bds.setUrl(dbs.getDbURL()); + bds.setUsername(dbs.getDbUsername()); + bds.setPassword(dbs.getDbPassword()); + bds.setMaxTotal(maxTotal); + bds.setInitialSize(minIdle); + bds.setMinIdle(minIdle); + bds.setMaxConnLifetimeMillis(maxConnLifetimeMillis); + bds.setTestOnBorrow(isTestOnBorrow); + bds.setTestWhileIdle(isTestWhileIdle); + + log.info("Connecting to database '{}' as user '{}'", dbs.getDbName(), dbs.getDbUsername()); + log.debug("Database URL: {}", dbs.getDbURL()); + log.debug( + "Pool settings: [max-total: {}, min-idle: {}, max-conn-lifetime-millis: {}, test-on-borrow: {}, test-while-idle: {}]", + maxTotal, minIdle, maxConnLifetimeMillis, isTestOnBorrow, isTestWhileIdle); + + } + + public Connection getConnection() throws SQLException { + + return bds.getConnection(); + } + + @Override + public int getMaxTotal() { + return maxTotal; + } + + @Override + public int getInitialSize() { + return minIdle; + } + + @Override + public int getMinIdle() { + return minIdle; + } + + @Override + public long getMaxConnLifetimeMillis() { + return maxConnLifetimeMillis; + } + + @Override + public boolean getTestOnBorrow() { + return isTestOnBorrow; + } + + @Override + public boolean getTestWhileIdle() { + return isTestWhileIdle; + } + +} diff --git a/src/main/java/it/grid/storm/persistence/pool/impl/DefaultMySqlDatabaseConnector.java b/src/main/java/it/grid/storm/persistence/pool/impl/DefaultMySqlDatabaseConnector.java new file mode 100644 index 000000000..e7483bd04 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/pool/impl/DefaultMySqlDatabaseConnector.java @@ -0,0 +1,73 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.pool.impl; + +import static java.lang.String.format; + +import it.grid.storm.config.StormConfiguration; +import it.grid.storm.persistence.pool.DatabaseConnector; + +public class DefaultMySqlDatabaseConnector implements DatabaseConnector { + + private static final String MYSQL_DRIVER = "com.mysql.cj.jdbc.Driver"; + + private final String name; + private final String url; + private final String username; + private final String password; + + private DefaultMySqlDatabaseConnector(String database) { + + this.name = database; + + StormConfiguration config = StormConfiguration.getInstance(); + + this.username = config.getDbUsername(); + this.password = config.getDbPassword(); + + String hostname = config.getDbHostname(); + int port = config.getDbPort(); + String properties = config.getDbProperties(); + + if (properties.isEmpty()) { + this.url = format("jdbc:mysql://%s:%d/%s", hostname, port, database); + } else { + this.url = format("jdbc:mysql://%s:%d/%s?%s", hostname, port, database, properties); + } + } + + @Override + public String getDriverName() { + return MYSQL_DRIVER; + } + + @Override + public String getDbURL() { + return url; + } + + @Override + public String getDbUsername() { + return username; + } + + @Override + public String getDbPassword() { + return password; + } + + public static DatabaseConnector getStormDbDatabaseConnector() { + return new DefaultMySqlDatabaseConnector("storm_db"); + } + + public static DatabaseConnector getStormBeIsamDatabaseConnector() { + return new DefaultMySqlDatabaseConnector("storm_be_ISAM"); + } + + @Override + public String getDbName() { + return name; + } +} diff --git a/src/main/java/it/grid/storm/persistence/pool/impl/StormBeIsamConnectionPool.java b/src/main/java/it/grid/storm/persistence/pool/impl/StormBeIsamConnectionPool.java new file mode 100644 index 000000000..df946db68 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/pool/impl/StormBeIsamConnectionPool.java @@ -0,0 +1,29 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.pool.impl; + +import it.grid.storm.config.StormConfiguration; + +public class StormBeIsamConnectionPool extends DefaultDatabaseConnectionPool { + + private static StormBeIsamConnectionPool instance; + + public static synchronized StormBeIsamConnectionPool getInstance() { + if (instance == null) { + instance = new StormBeIsamConnectionPool(); + } + return instance; + } + + private final static StormConfiguration c = StormConfiguration.getInstance(); + + private StormBeIsamConnectionPool() { + + super(DefaultMySqlDatabaseConnector.getStormBeIsamDatabaseConnector(), c.getStormBeIsamPoolSize(), + c.getStormBeIsamPoolMinIdle(), c.getDbPoolMaxWaitMillis(), c.isDbPoolTestOnBorrow(), + c.isDbPoolTestWhileIdle()); + } + +} diff --git a/src/main/java/it/grid/storm/persistence/pool/impl/StormDbConnectionPool.java b/src/main/java/it/grid/storm/persistence/pool/impl/StormDbConnectionPool.java new file mode 100644 index 000000000..8219fd21a --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/pool/impl/StormDbConnectionPool.java @@ -0,0 +1,28 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.pool.impl; + +import it.grid.storm.config.StormConfiguration; + +public class StormDbConnectionPool extends DefaultDatabaseConnectionPool { + + private static StormDbConnectionPool instance; + + public static synchronized StormDbConnectionPool getInstance() { + if (instance == null) { + instance = new StormDbConnectionPool(); + } + return instance; + } + + private final static StormConfiguration c = StormConfiguration.getInstance(); + + private StormDbConnectionPool() { + + super(DefaultMySqlDatabaseConnector.getStormDbDatabaseConnector(), c.getStormDbPoolSize(), + c.getStormDbPoolMinIdle(), c.getDbPoolMaxWaitMillis(), c.isDbPoolTestOnBorrow(), + c.isDbPoolTestWhileIdle()); + } +} \ No newline at end of file diff --git a/src/main/java/it/grid/storm/persistence/util/db/DBConnection.java b/src/main/java/it/grid/storm/persistence/util/db/DBConnection.java deleted file mode 100644 index 2084fbcec..000000000 --- a/src/main/java/it/grid/storm/persistence/util/db/DBConnection.java +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.persistence.util.db; - -import it.grid.storm.persistence.DataSourceConnectionFactory; -import it.grid.storm.persistence.exceptions.PersistenceException; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class DBConnection implements DataSourceConnectionFactory - -{ - - private static final Logger log = LoggerFactory.getLogger(DBConnection.class); - private Connection connection = null; - private DataBaseStrategy db; - - public DBConnection(DataBaseStrategy db) throws PersistenceException { - - this.db = db; - - try { - Class.forName(db.getDriverName()).newInstance(); - } catch (Exception ex) { - log.error("Exception while getting JDBC driver: {}", ex.getMessage(), ex); - throw new PersistenceException("Driver loading problem", ex); - } - } - - private void handleSQLException(SQLException e) throws PersistenceException{ - - log.error("SQL Error: {}, SQLState: {}, VendorError: {}.", - e.getMessage(), - e.getSQLState(), - e.getErrorCode(), - e); - - throw new PersistenceException(e); - - } - - public Connection borrowConnection() throws PersistenceException { - - Connection result = null; - try { - result = getConnection(); - } catch (SQLException e) { - handleSQLException(e); - } - return result; - } - - public void giveBackConnection(Connection con) throws PersistenceException { - - if (connection != null) { - try { - shutdown(); - } catch (SQLException e) { - handleSQLException(e); - } - } else { - throw new PersistenceException("Closing NON-Existing connection"); - } - } - - private Connection getConnection() throws SQLException { - - if (connection == null) { - String url = db.getConnectionString(); - connection = DriverManager.getConnection(url, db.getDbUsr(), - db.getDbPwd()); - } - return connection; - } - - private void shutdown() throws SQLException { - - connection.close(); // if there are no other open connection - connection = null; - } - -} diff --git a/src/main/java/it/grid/storm/persistence/util/db/DBConnectionPool.java b/src/main/java/it/grid/storm/persistence/util/db/DBConnectionPool.java deleted file mode 100644 index 7d2069507..000000000 --- a/src/main/java/it/grid/storm/persistence/util/db/DBConnectionPool.java +++ /dev/null @@ -1,162 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.persistence.util.db; - -import it.grid.storm.persistence.DataSourceConnectionFactory; -import it.grid.storm.persistence.exceptions.PersistenceException; - -import java.sql.Connection; -import java.sql.SQLException; - -import org.apache.commons.dbcp2.cpdsadapter.DriverAdapterCPDS; -import org.apache.commons.dbcp2.datasources.SharedPoolDataSource; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class DBConnectionPool implements DataSourceConnectionFactory { - - private static final Logger log = LoggerFactory - .getLogger(DBConnectionPool.class); - private DataBaseStrategy db; - private static SharedPoolDataSource sharedDatasource; - private static DBConnectionPool instance = new DBConnectionPool(); - private static long handle = -1; - - private DBConnectionPool() { - super(); - } - - public static DBConnectionPool getPoolInstance() { - if (handle == -1) { - return null; - } else { - return instance; - } - } - - public static void initPool(DataBaseStrategy db, int maxActive, int maxWait) - throws PersistenceException { - instance.init(db, maxActive, maxWait); - } - - - private void handleSQLException(SQLException e) throws PersistenceException{ - - log.error("SQL Error: {}, SQLState: {}, VendorError: {}.", - e.getMessage(), - e.getSQLState(), - e.getErrorCode(), - e); - - throw new PersistenceException(e); - - } - public Connection borrowConnection() throws PersistenceException { - - Connection result = null; - if (handle == -1) { - throw new PersistenceException("Connection Pool is not initialized!"); - } - try { - result = sharedDatasource.getConnection(); - } catch (SQLException e) { - handleSQLException(e); - } - return result; - } - - public void giveBackConnection(Connection con) throws PersistenceException { - - if (con != null) { - try { - shutdown(con); - } catch (SQLException e) { - handleSQLException(e); - } - } else { - throw new PersistenceException("Closing NON-Existing connection"); - } - } - - public String getPoolInfo() throws PersistenceException { - - String result = ""; - if (handle == -1) { - throw new PersistenceException("Connection Pool is not initialized!"); - } - if (sharedDatasource.getValidationQuery() != null) { - result += "Validation query = " + sharedDatasource.getValidationQuery() - + "\n"; - } - if (sharedDatasource.getDescription() != null) { - result += "Description = " + sharedDatasource.getDescription() + "\n"; - } - result += "Nr Connection Active = " + sharedDatasource.getNumActive() - + "\n"; - result += "Nr Connection Idle = " + sharedDatasource.getNumIdle() + "\n"; - result += "Nr Max Active Connection = " + sharedDatasource.getMaxTotal() - + "\n"; - - return result; - } - - private void init(DataBaseStrategy db, int maxActive, int maxWait) { - - instance.setDatabaseStrategy(db); - DriverAdapterCPDS connectionPoolDatasource = new DriverAdapterCPDS(); - try { - connectionPoolDatasource.setDriver(db.getDriverName()); - } catch (Exception ex) { - log.error("Exception while getting driver: {}", ex.getMessage(), ex); - } - - String connectionString = db.getConnectionString(); - connectionPoolDatasource.setUrl(connectionString); - log.debug("Database connection string: {}", connectionString); - connectionPoolDatasource.setUser(db.getDbUsr()); - connectionPoolDatasource.setPassword(db.getDbPwd()); - - sharedDatasource = new SharedPoolDataSource(); - sharedDatasource.setConnectionPoolDataSource(connectionPoolDatasource); - - sharedDatasource.setMaxTotal(maxActive); - sharedDatasource.setDefaultMaxWaitMillis(maxWait); - sharedDatasource.setValidationQuery("SELECT 1"); - sharedDatasource.setDefaultTestOnBorrow(true); - - handle = System.currentTimeMillis(); - } - - /** - * - * @throws SQLException - */ - private void shutdown(Connection conn) throws SQLException { - - conn.close(); - conn = null; - } - - public static void printInfo(DBConnectionPool pool) { - - try { - log.info("DATABASE POOL INFO: {}" , pool.getPoolInfo()); - } catch (PersistenceException ex2) { - log.error(ex2.getMessage(),ex2); - } - - } - - public DataBaseStrategy getDatabaseStrategy() { - - return db; - } - - private void setDatabaseStrategy(DataBaseStrategy db) { - - this.db = db; - } - -} diff --git a/src/main/java/it/grid/storm/persistence/util/db/DataBaseStrategy.java b/src/main/java/it/grid/storm/persistence/util/db/DataBaseStrategy.java deleted file mode 100644 index 63e583fbe..000000000 --- a/src/main/java/it/grid/storm/persistence/util/db/DataBaseStrategy.java +++ /dev/null @@ -1,123 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.persistence.util.db; - -public class DataBaseStrategy { - - private final String dbmsVendor; - private final String driverName; - private final String jdbcPrefix; - private String dbName; - private String dbPrefix; - private String dbHost; - private String dbUsr; - private String dbPwd; - private SQLFormat formatter; - private String properties; - - public DataBaseStrategy(String dbmsVendor, String driverName, String prefix, - SQLFormat formatter) { - - this.dbmsVendor = dbmsVendor; - this.driverName = driverName; - jdbcPrefix = prefix; - this.formatter = formatter; - this.properties = ""; - } - - - public String getDbmsVendor() { - return dbmsVendor; - } - - public String getDriverName() { - return driverName; - } - - public String getJdbcPrefix() { - - return jdbcPrefix; - } - - public void setDbUsr(String usrDb) { - - dbUsr = usrDb; - } - - public String getDbUsr() { - - return dbUsr; - } - - public void setDbPwd(String pwd) { - - dbPwd = pwd; - } - - public String getDbPwd() { - - return dbPwd; - } - - public void setDbName(String dbName) { - - this.dbName = dbName; - } - - public String getDbName() { - - return dbName; - } - - public void setDbPrefix(String dbName) { - - dbPrefix = dbName; - } - - public String getDbPrefix() { - - return dbPrefix; - } - - public void setDbHost(String host) { - - dbHost = host; - } - - public String getDbHost() { - - return dbHost; - } - - public String getConnectionString() { - - String connStr = jdbcPrefix + dbHost + "/" + dbName; - if (!properties.isEmpty()) { - connStr += "?" + properties; - } - return connStr; - } - - public void setFormatter(SQLFormat formatter) { - - this.formatter = formatter; - } - - public SQLFormat getFormatter() { - - return formatter; - } - - public void setProperties(String encodedProperties) { - - this.properties = encodedProperties; - } - - @Override - public String toString() { - - return dbmsVendor; - } -} diff --git a/src/main/java/it/grid/storm/persistence/util/db/Databases.java b/src/main/java/it/grid/storm/persistence/util/db/Databases.java deleted file mode 100644 index 677c1efcf..000000000 --- a/src/main/java/it/grid/storm/persistence/util/db/Databases.java +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.persistence.util.db; - -import java.util.Map; - -import com.google.common.collect.Maps; - -import it.grid.storm.config.Configuration; - -public class Databases { - - private static final Map DATABASES = Maps.newHashMap(); - - private static final String MYSQL_VENDOR = "mysql"; - private static final String MYSQL_DRIVER = "com.mysql.cj.jdbc.Driver"; - private static final String MYSQL_PREFIX = "jdbc:mysql://"; - private static final SQLFormat MYSQL_FORMATTER = new MySqlFormat(); - - private static final String DB_NAME = "storm_be_ISAM"; - - static { - Configuration config = Configuration.getInstance(); - DataBaseStrategy dbs = new DataBaseStrategy(MYSQL_VENDOR, MYSQL_DRIVER, MYSQL_PREFIX, MYSQL_FORMATTER); - dbs.setDbUsr(config.getDBUserName()); - dbs.setDbPwd(config.getDBPassword()); - dbs.setProperties(config.getDBProperties()); - dbs.setDbName(DB_NAME); - dbs.setDbHost(config.getDBHostname()); - DATABASES.put(MYSQL_VENDOR, dbs); - } - - public static DataBaseStrategy getDataBaseStrategy(String vendor) { - - return DATABASES.get(vendor); -} -} diff --git a/src/main/java/it/grid/storm/persistence/util/db/InsertBuilder.java b/src/main/java/it/grid/storm/persistence/util/db/InsertBuilder.java deleted file mode 100644 index 4748fd6da..000000000 --- a/src/main/java/it/grid/storm/persistence/util/db/InsertBuilder.java +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.persistence.util.db; - -import java.util.Map; -import java.util.HashMap; -import java.util.Iterator; - -public class InsertBuilder extends SQLBuilder { - - private String table; - private Map columnsAndData = new HashMap(); - - public void setTable(String table) { - - this.table = table; - } - - public String getTable() { - - return table; - } - - public String getCommand() { - - return "INSERT INTO "; - } - - public String getCriteria() { - - return ""; - } - - public String getWhat() { - - StringBuilder columns = new StringBuilder(); - StringBuilder values = new StringBuilder(); - StringBuilder what = new StringBuilder(); - - String columnName = null; - Iterator iter = columnsAndData.keySet().iterator(); - while (iter.hasNext()) { - columnName = iter.next(); - columns.append(columnName); - values.append(columnsAndData.get(columnName)); - if (iter.hasNext()) { - columns.append(','); - values.append(','); - } - } - - what.append(" ("); - what.append(columns); - what.append(") VALUES ("); - what.append(values); - what.append(") "); - return what.toString(); - - } - - public void addColumnAndData(String columnName, Object value) { - - if (value != null) { - columnsAndData.put(columnName, value); - } - } -} diff --git a/src/main/java/it/grid/storm/persistence/util/db/MySqlFormat.java b/src/main/java/it/grid/storm/persistence/util/db/MySqlFormat.java deleted file mode 100644 index 3eee51b93..000000000 --- a/src/main/java/it/grid/storm/persistence/util/db/MySqlFormat.java +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.persistence.util.db; - -import java.text.SimpleDateFormat; - -public class MySqlFormat implements SQLFormat { - - private static final SimpleDateFormat dateFormat = new SimpleDateFormat( - "yyyy-MM-dd HH:mm:ss"); - - /** - * Create a string value of fields insertable into the query - * - * @param value - * Object - * @return String - */ - public String format(Object value) { - - if (value == null) { - return null; - } - Class clazz = value.getClass(); - if (Character.class.equals(clazz) || char.class.equals(clazz)) { - value = value.toString(); - } - if (value instanceof String) { - return value.toString(); - } - if (value instanceof java.util.Date) { - return dateFormat.format(value); - } - return value.toString(); - } - -} diff --git a/src/main/java/it/grid/storm/persistence/util/db/SQLBuilder.java b/src/main/java/it/grid/storm/persistence/util/db/SQLBuilder.java deleted file mode 100644 index b436fcb63..000000000 --- a/src/main/java/it/grid/storm/persistence/util/db/SQLBuilder.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.persistence.util.db; - -public abstract class SQLBuilder { - - public SQLBuilder() { - - super(); - } - - public abstract String getCommand(); - - public abstract String getTable(); - - public abstract String getWhat(); - - public abstract String getCriteria(); - -} diff --git a/src/main/java/it/grid/storm/persistence/util/db/SQLHelper.java b/src/main/java/it/grid/storm/persistence/util/db/SQLHelper.java deleted file mode 100644 index b09d96652..000000000 --- a/src/main/java/it/grid/storm/persistence/util/db/SQLHelper.java +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). - * SPDX-License-Identifier: Apache-2.0 - */ -package it.grid.storm.persistence.util.db; - -public abstract class SQLHelper { - - public String dbmsVendor; - private SQLFormat formatter; - - protected SQLHelper(String dbmsVendor) { - - this.dbmsVendor = dbmsVendor; - this.formatter = Databases.getDataBaseStrategy(dbmsVendor).getFormatter(); - } - - public String format(Object value) { - - return formatter.format(value); - } - - /** - * - * @param value - * boolean - * @return String - */ - public String format(boolean value) { - - String result = null; - Boolean boolValue = new Boolean(value); - result = formatter.format(boolValue); - return result; - } - - /** - * - * @param value - * int - * @return String - */ - public String format(int value) { - - String result = null; - Integer intValue = null; - try { - intValue = new Integer(value); - } catch (NumberFormatException nfe) { - nfe.printStackTrace(); - } - result = formatter.format(intValue); - return result; - } - - /** - * - * @param value - * long - * @return String - */ - public String format(long value) { - - String result = null; - Long longValue = null; - try { - longValue = new Long(value); - } catch (NumberFormatException nfe) { - nfe.printStackTrace(); - } - result = formatter.format(longValue); - return result; - } - - /** - * - * @param date - * Date - * @return String - */ - public String format(java.util.Date date) { - - return formatter.format(date); - } - -} diff --git a/src/main/java/it/grid/storm/persistence/util/helper/MySqlFormat.java b/src/main/java/it/grid/storm/persistence/util/helper/MySqlFormat.java new file mode 100644 index 000000000..20c90f043 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/util/helper/MySqlFormat.java @@ -0,0 +1,30 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.util.helper; + +import java.text.SimpleDateFormat; + +public class MySqlFormat implements SQLFormat { + + private static final SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + + /** + * Create a string value of fields + * + * @param value Object + * @return String + */ + public String format(Object value) { + + if (value == null) { + return null; + } + if (value instanceof java.util.Date) { + return dateFormat.format(value); + } + return value.toString(); + } + +} diff --git a/src/main/java/it/grid/storm/persistence/util/db/SQLFormat.java b/src/main/java/it/grid/storm/persistence/util/helper/SQLFormat.java similarity index 79% rename from src/main/java/it/grid/storm/persistence/util/db/SQLFormat.java rename to src/main/java/it/grid/storm/persistence/util/helper/SQLFormat.java index 2d863e14a..852b88687 100644 --- a/src/main/java/it/grid/storm/persistence/util/db/SQLFormat.java +++ b/src/main/java/it/grid/storm/persistence/util/helper/SQLFormat.java @@ -2,7 +2,7 @@ * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). * SPDX-License-Identifier: Apache-2.0 */ -package it.grid.storm.persistence.util.db; +package it.grid.storm.persistence.util.helper; public interface SQLFormat { diff --git a/src/main/java/it/grid/storm/persistence/util/helper/SQLHelper.java b/src/main/java/it/grid/storm/persistence/util/helper/SQLHelper.java new file mode 100644 index 000000000..a116ca1bb --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/util/helper/SQLHelper.java @@ -0,0 +1,36 @@ +/** + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). + * SPDX-License-Identifier: Apache-2.0 + */ +package it.grid.storm.persistence.util.helper; + +public abstract class SQLHelper { + + private final SQLFormat formatter = new MySqlFormat(); + + public String format(Object value) { + + return formatter.format(value); + } + + public String format(boolean value) { + + return formatter.format(Boolean.valueOf(value)); + } + + public String format(int value) throws NumberFormatException { + + return formatter.format(Integer.valueOf(value)); + } + + public String format(long value) throws NumberFormatException { + + return formatter.format(Long.valueOf(value)); + } + + public String format(java.util.Date date) { + + return formatter.format(date); + } + +} diff --git a/src/main/java/it/grid/storm/persistence/util/helper/StorageSpaceSQLHelper.java b/src/main/java/it/grid/storm/persistence/util/helper/StorageSpaceSQLHelper.java index b94815e11..fc635e73a 100644 --- a/src/main/java/it/grid/storm/persistence/util/helper/StorageSpaceSQLHelper.java +++ b/src/main/java/it/grid/storm/persistence/util/helper/StorageSpaceSQLHelper.java @@ -7,7 +7,6 @@ import it.grid.storm.common.types.VO; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.persistence.model.StorageSpaceTO; -import it.grid.storm.persistence.util.db.SQLHelper; import java.sql.Connection; import java.sql.PreparedStatement; @@ -18,787 +17,753 @@ import java.util.LinkedList; import java.util.List; +import com.google.common.collect.Lists; + public class StorageSpaceSQLHelper extends SQLHelper { - private final static String TABLE_NAME = "storage_space"; - private final static HashMap COLS = new HashMap(); - - private static final String[] COLUMN_NAMES = { "SS_ID", "USERDN", "VOGROUP", - "ALIAS", "SPACE_TOKEN", "CREATED", "TOTAL_SIZE", "GUAR_SIZE", "FREE_SIZE", - "SPACE_FILE", "STORAGE_INFO", "LIFETIME", "SPACE_TYPE", "USED_SIZE", - "BUSY_SIZE", "UNAVAILABLE_SIZE", "AVAILABLE_SIZE", "RESERVED_SIZE", - "UPDATE_TIME" }; - - static { - COLS.put("storageSpaceId", "SS_ID"); - COLS.put("ownerName", "USERDN"); - COLS.put("ownerVO", "VOGROUP"); - COLS.put("alias", "ALIAS"); - COLS.put("token", "SPACE_TOKEN"); - COLS.put("created", "CREATED"); - COLS.put("spaceFile", "SPACE_FILE"); - COLS.put("storaqeInfo", "STORAGE_INFO"); - COLS.put("lifeTime", "LIFETIME"); - COLS.put("spaceType", "SPACE_TYPE"); - COLS.put("total_size", "TOTAL_SIZE"); - COLS.put("guar_size", "GUAR_SIZE"); - COLS.put("free_size", "FREE_SIZE"); - COLS.put("used_size", "USED_SIZE"); - COLS.put("busy_size", "BUSY_SIZE"); - COLS.put("unavailable_size", "UNAVAILABLE_SIZE"); - COLS.put("available_size", "AVAILABLE_SIZE"); - COLS.put("reserved_size", "RESERVED_SIZE"); - COLS.put("update_time", "UPDATE_TIME"); - } - - /** - * CONSTRUCTOR - */ - public StorageSpaceSQLHelper(String dbmsVendor) { - - super(dbmsVendor); - } - - /** - * - * @return String[] - */ - public String[] getColumnNames() { - - return COLUMN_NAMES; - } - - /** - * INSERT NEW ROW into TABLE - * - * @param ssTO - * StorageSpaceTO - * @return String - * @throws SQLException - */ - - public PreparedStatement insertQuery(Connection conn, StorageSpaceTO ssTO) - throws SQLException { - - List values = new LinkedList(); - - StringBuilder fields = new StringBuilder("("); - StringBuilder placeholders = new StringBuilder("("); - - if (ssTO != null) { - if (ssTO.getOwnerName() != null) { - fields.append(COLS.get("ownerName") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getOwnerName())); - } - - fields.append(COLS.get("ownerVO") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getVoName())); - - if (ssTO.getAlias() != null) { - fields.append(COLS.get("alias") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getAlias())); - } - if (ssTO.getSpaceToken() != null) { - fields.append(COLS.get("token") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getSpaceToken())); - } - if (ssTO.getCreated() != null) { - fields.append(COLS.get("created") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getCreated())); - } - if (ssTO.getSpaceFile() != null) { - fields.append(COLS.get("spaceFile") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getSpaceFile())); - } - if (ssTO.getStorageInfo() != null) { - fields.append(COLS.get("storaqeInfo") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getStorageInfo())); - } - if (ssTO.getLifetime() != -1) { - fields.append(COLS.get("lifeTime") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getLifetime())); - } - if (ssTO.getSpaceType() != null) { - fields.append(COLS.get("spaceType") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getSpaceType())); - } - if ((ssTO.getTotalSize() != 0) || (ssTO.getTotalSize() != -1)) { - fields.append(COLS.get("total_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getTotalSize())); - } - if ((ssTO.getGuaranteedSize() != 0) || (ssTO.getGuaranteedSize() != -1)) { - fields.append(COLS.get("guar_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getGuaranteedSize())); - } - if ((ssTO.getFreeSize() != 0) || (ssTO.getFreeSize() != -1)) { - fields.append(COLS.get("free_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getFreeSize())); - } - if ((ssTO.getUsedSize() != 0) || (ssTO.getUsedSize() != -1)) { - fields.append(COLS.get("used_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getUsedSize())); - } - if ((ssTO.getBusySize() != 0) || (ssTO.getBusySize() != -1)) { - fields.append(COLS.get("busy_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getBusySize())); - } - if ((ssTO.getUnavailableSize() != 0) || (ssTO.getUnavailableSize() != -1)) { - fields.append(COLS.get("unavailable_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getUnavailableSize())); - } - - if ((ssTO.getAvailableSize() != 0) || (ssTO.getAvailableSize() != -1)) { - fields.append(COLS.get("available_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getAvailableSize())); - } - if ((ssTO.getReservedSize() != 0) || (ssTO.getReservedSize() != -1)) { - fields.append(COLS.get("reserved_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getReservedSize())); - } - if (ssTO.getUpdateTime() != null) { - fields.append(COLS.get("update_time").concat(",")); - placeholders.append("?,"); - values.add(format(ssTO.getUpdateTime())); - } - } - - fields.deleteCharAt(fields.length() - 1); - fields.append(")"); - placeholders.deleteCharAt(placeholders.length() - 1); - placeholders.append(")"); - - String str = "INSERT INTO " + TABLE_NAME + " " + fields.toString() - + " VALUES " + placeholders.toString(); - PreparedStatement preparedStatement = conn.prepareStatement(str); - - int index = 1; - for (String val : values) { - preparedStatement.setString(index, val); - index++; - } - - return preparedStatement; - } - - /** - * Create a StorageSpace Transfer Object coming from Result Set - * - * @param res - * ResultSet - * @return StorageSpaceTO - */ - public StorageSpaceTO makeStorageSpaceTO(ResultSet res) { - - StorageSpaceTO ssTO = new StorageSpaceTO(); - - try { - ssTO.setStorageSpaceId(new Long(res.getLong("SS_ID"))); - - ssTO.setOwnerName(res.getString("USERDN")); - ssTO.setVoName(res.getString("VOGROUP")); - ssTO.setAlias(res.getString("ALIAS")); - ssTO.setSpaceToken(res.getString("SPACE_TOKEN")); - - java.sql.Timestamp createdTimeStamp = res.getTimestamp("CREATED"); - Date creationDate = new Date(createdTimeStamp.getTime()); - ssTO.setCreated(creationDate); - - ssTO.setSpaceFile(res.getString("SPACE_FILE")); - ssTO.setStorageInfo(res.getString("STORAGE_INFO")); - long tempLong = res.getLong("LIFETIME"); - if (!res.wasNull()) { - ssTO.setLifetime(tempLong); - } - - ssTO.setSpaceType(res.getString("SPACE_TYPE")); - - // Sizes - tempLong = res.getLong("TOTAL_SIZE"); - if (!res.wasNull()) { - ssTO.setTotalSize(tempLong); - } - tempLong = res.getLong("GUAR_SIZE"); - if (!res.wasNull()) { - ssTO.setGuaranteedSize(tempLong); - } - tempLong = res.getLong("RESERVED_SIZE"); - if (!res.wasNull()) { - ssTO.setReservedSize(tempLong); - } - tempLong = res.getLong("FREE_SIZE"); - if (!res.wasNull()) { - ssTO.setFreeSize(tempLong); - } - tempLong = res.getLong("AVAILABLE_SIZE"); - if (!res.wasNull()) { - ssTO.setAvailableSize(tempLong); - } - tempLong = res.getLong("USED_SIZE"); - if (!res.wasNull()) { - ssTO.setUsedSize(tempLong); - } - tempLong = res.getLong("BUSY_SIZE"); - if (!res.wasNull()) { - ssTO.setBusySize(tempLong); - } - tempLong = res.getLong("UNAVAILABLE_SIZE"); - if (!res.wasNull()) { - ssTO.setUnavailableSize(tempLong); - } - - // Last Update - java.sql.Timestamp updatedTimeStamp = res.getTimestamp("UPDATE_TIME"); - Date updateDate = new Date(updatedTimeStamp.getTime()); - ssTO.setUpdateTime(updateDate); - - } catch (SQLException ex) { - ex.printStackTrace(); - } - - return ssTO; - } - - // ************ HELPER Method *************** // - - /** - * @param vo - * @return - */ - private String getVOName(String vo) { - - String voStr = VO.makeNoVo().getValue(); - if (vo != null && !vo.trim().equals("")) { - voStr = vo.trim(); - } - return voStr; - } - - /** - * - * - * @param token - * String - * @param conn - * @return String - * @throws SQLException - */ - public PreparedStatement selectByTokenQuery(Connection conn, String token) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM storage_space where space_token=?"; - preparedStatement = conn.prepareStatement(str); - preparedStatement.setString(1, token); - - return preparedStatement; - } - - /** - * Returns the SQL string for selecting all columns from the table - * 'storage_space' in the 'storm_be_ISAM' database matching 'user' and - * 'spaceAlias'. 'spaceAlias' can be NULL or empty. - * - * @param user - * VomsGridUser. - * @param spaceAlias - * String. - * @return String. - * @throws SQLException - */ - public PreparedStatement selectBySpaceAliasQuery(Connection conn, - GridUserInterface user, String spaceAlias) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - String dn = user.getDn(); - - if ((spaceAlias == null) || (spaceAlias.length() == 0)) { - str = "SELECT * FROM storage_space where userdn=?"; - preparedStatement = conn.prepareStatement(str); - preparedStatement.setString(1, dn); - } else { - str = "SELECT * FROM storage_space where userdn=? AND alias=?"; - preparedStatement = conn.prepareStatement(str); - preparedStatement.setString(1, dn); - preparedStatement.setString(2, spaceAlias); - } - return preparedStatement; - } - - /** - * Returns the SQL string for selecting all columns from the table - * 'storage_space' in the 'storm_be_ISAM' database matching 'user' and - * 'spaceAlias'. 'spaceAlias' can be NULL or empty. - * - * @param user - * VomsGridUser. - * @param spaceAlias - * String. - * @return String. - * @throws SQLException - */ - public PreparedStatement selectBySpaceAliasOnlyQuery(Connection conn, - String spaceAlias) throws SQLException { - - /* - * This is to distinguish a client reseve space with a VOSpaceArea both with - * the same token. Only the one made by the namespace process contains a - * fake dn - */ - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM storage_space where alias=?"; - preparedStatement = conn.prepareStatement(str); - preparedStatement.setString(1, spaceAlias); - - return preparedStatement; - } - - /** - * Returns the SQL string for selecting all columns from the table - * 'storage_space' in the 'storm_be_ISAM' database matching 'voname'. - * - * @param voname - * string - * @return String. - * @throws SQLException - */ - - public PreparedStatement selectBySpaceType(Connection conn, String voname) - throws SQLException { - - /* - * This is to distinguish a client reseve space with a VOSpaceArea both with - * the same token. Only the one made by the namespace process contains a - * fake dn - */ - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM storage_space where SPACE_TYPE=?"; - preparedStatement = conn.prepareStatement(str); - preparedStatement.setString(1, voname); - - return preparedStatement; - } - - /** - * This method return the SQL query to evaluate all expired space reservation - * requests. - * - * @param time - * Current time (in second) to compare to the reservationTime + - * lifetime - * @return String SQL query - * @throws SQLException - */ - public PreparedStatement selectExpiredQuery(Connection conn, - long currentTimeInSecond) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM storage_space where lifetime is not null and (UNIX_TIMESTAMP(created)+lifetime< ?)"; - preparedStatement = conn.prepareStatement(str); - preparedStatement.setLong(1, currentTimeInSecond); - - return preparedStatement; - - } - - /** - * @param size - * @return - * @throws SQLException - */ - public PreparedStatement selectByUnavailableUsedSpaceSizeQuery( - Connection conn, long unavailableSizeValue) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM storage_space where " + COLS.get("used_size") - + " IS NULL or " + COLS.get("used_size") + "=?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setLong(1, unavailableSizeValue); - - return preparedStatement; - } - - /** - * @param lastUpdateTimestamp - * @return - * @throws SQLException - */ - - public PreparedStatement selectByPreviousOrNullLastUpdateQuery( - Connection conn, long lastUpdateTimestamp) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM storage_space where " + COLS.get("update_time") - + " IS NULL or UNIX_TIMESTAMP(" + COLS.get("update_time") + ") < ?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setLong(1, lastUpdateTimestamp); - - return preparedStatement; - - } - - /** - * Returns the SQL query for removing a row from the table 'storage_space' in - * the 'storm_be_ISAM' database matching 'userDN' and 'spaceToken'. - * - * @param user - * @param spaceToken - * @return - * @throws SQLException - */ - public PreparedStatement removeByTokenQuery(Connection conn, - GridUserInterface user, String spaceToken) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "DELETE FROM storage_space WHERE ((USERDN=?) AND (SPACE_TOKEN=?))"; - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setString(1, user.getDn()); - preparedStatement.setString(2, spaceToken); - - return preparedStatement; - } - - /** - * Returns the SQL query for removing a row from the table 'storage_space' in - * the 'storm_be_ISAM' database matching 'spaceToken'. - * - * @param spaceToken - * @return - * @throws SQLException - */ - public PreparedStatement removeByTokenQuery(Connection conn, String spaceToken) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "DELETE FROM storage_space WHERE (SPACE_TOKEN=?)"; - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setString(1, spaceToken); - - return preparedStatement; - } - - /** - * Provides a query that updates all row fields accordingly to the provided - * StorageSpaceTO - * - * @param ssTO - * @return - * @throws IllegalArgumentException - * @throws SQLException - */ - public PreparedStatement updateByAliasAndTokenQuery(Connection conn, - StorageSpaceTO ssTO) throws IllegalArgumentException, SQLException { - - List values = new LinkedList(); - - if (ssTO == null) { - throw new IllegalArgumentException(); - } - String query = "UPDATE storage_space SET"; - if (ssTO.getOwnerName() != null) { - query += " " + COLS.get("ownerName") + " = ?" + " ,"; - values.add(format(ssTO.getOwnerName())); - } - - query += " " + COLS.get("ownerVO") + " = ?" + " ,"; - values.add(format(getVOName(ssTO.getVoName()))); - - if (ssTO.getCreated() != null) { - query += " " + COLS.get("created") + " = ?" + " ,"; - values.add(format(ssTO.getCreated())); - } - if (ssTO.getSpaceFile() != null) { - query += " " + COLS.get("spaceFile") + " = ?" + " ,"; - values.add(format(ssTO.getSpaceFile())); - } - if (ssTO.getStorageInfo() != null) { - query += " " + COLS.get("storaqeInfo") + " = ?" + " ,"; - values.add(format(ssTO.getStorageInfo())); - } - if (ssTO.getLifetime() != -1) { - query += " " + COLS.get("lifeTime") + " = ?" + " ,"; - values.add(format(ssTO.getLifetime())); - } - if (ssTO.getSpaceType() != null) { - query += " " + COLS.get("spaceType") + " = ?" + " ,"; - values.add(format(ssTO.getSpaceType())); - } - if ((ssTO.getTotalSize() != 0) || (ssTO.getTotalSize() != -1)) { - query += " " + COLS.get("total_size") + " = ?" + " ,"; - values.add(format(ssTO.getTotalSize())); - } - if ((ssTO.getGuaranteedSize() != 0) || (ssTO.getGuaranteedSize() != -1)) { - query += " " + COLS.get("guar_size") + " = ?" + " ,"; - values.add(format(ssTO.getGuaranteedSize())); - } - if ((ssTO.getFreeSize() != 0) || (ssTO.getFreeSize() != -1)) { - query += " " + COLS.get("free_size") + " = ?" + " ,"; - values.add(format(ssTO.getFreeSize())); - } - if ((ssTO.getUsedSize() != 0) || (ssTO.getUsedSize() != -1)) { - query += " " + COLS.get("used_size") + " = ?" + " ,"; - values.add(format(ssTO.getUsedSize())); - } - if ((ssTO.getBusySize() != 0) || (ssTO.getBusySize() != -1)) { - query += " " + COLS.get("busy_size") + " = ?" + " ,"; - values.add(format(ssTO.getBusySize())); - } - if ((ssTO.getUnavailableSize() != 0) || (ssTO.getUnavailableSize() != -1)) { - query += " " + COLS.get("unavailable_size") + " = ?" + " ,"; - values.add(format(ssTO.getUnavailableSize())); - } - if ((ssTO.getAvailableSize() != 0) || (ssTO.getAvailableSize() != -1)) { - query += " " + COLS.get("available_size") + " = ?" + " ,"; - values.add(format(ssTO.getAvailableSize())); - } - if ((ssTO.getReservedSize() != 0) || (ssTO.getReservedSize() != -1)) { - query += " " + COLS.get("reserved_size") + " = ?" + " ,"; - values.add(format(ssTO.getReservedSize())); - } - if (ssTO.getUpdateTime() != null) { - query += " " + COLS.get("update_time") + " = ?" + " ,"; - values.add(format(ssTO.getUpdateTime())); - } - if (query.charAt(query.length() - 1) == ',') { - query = query.substring(0, query.length() - 1); - } - query += " where " + COLS.get("alias") + " = ?" + " and " + COLS.get("token") + " = ?"; - - values.add(format(ssTO.getAlias())); - values.add(format(ssTO.getSpaceToken())); - - PreparedStatement preparedStatement = conn.prepareStatement(query); - - int index = 1; - for (String val : values) { - preparedStatement.setString(index, val); - index++; - } - - return preparedStatement; - } - - /** - * Provides a query that updates all row fields accordingly to the provided - * StorageSpaceTO and using SpaceToken as key - * - * @param ssTO - * @return - * @throws IllegalArgumentException - * @throws SQLException - */ - public PreparedStatement updateByTokenQuery(Connection conn, - StorageSpaceTO ssTO) throws IllegalArgumentException, SQLException { - - List values = new LinkedList(); - - if (ssTO == null) { - throw new IllegalArgumentException(); - } - String query = "UPDATE storage_space SET"; - if (ssTO.getOwnerName() != null) { - query += " " + COLS.get("ownerName") + " = ?" + " ,"; - values.add(format(ssTO.getOwnerName())); - } - - query += " " + COLS.get("ownerVO") + " = ?" + " ,"; - values.add((getVOName(ssTO.getVoName()))); - - if (ssTO.getCreated() != null) { - query += " " + COLS.get("created") + " = ?" + " ,"; - values.add(format(ssTO.getCreated())); - } - if (ssTO.getAlias() != null) { - query += " " + COLS.get("alias") + " = ?" + " ,"; - values.add(format(ssTO.getAlias())); - } - if (ssTO.getSpaceFile() != null) { - query += " " + COLS.get("spaceFile") + " = ?" + " ,"; - values.add(format(ssTO.getSpaceFile())); - } - if (ssTO.getStorageInfo() != null) { - query += " " + COLS.get("storaqeInfo") + " = ?" + " ,"; - values.add(format(ssTO.getStorageInfo())); - } - if (ssTO.getLifetime() != -1) { - query += " " + COLS.get("lifeTime") + " = ?" + " ,"; - values.add(format(ssTO.getLifetime())); - } - if (ssTO.getSpaceType() != null) { - query += " " + COLS.get("spaceType") + " = ?" + " ,"; - values.add(format(ssTO.getSpaceType())); - } - if ((ssTO.getTotalSize() != 0) || (ssTO.getTotalSize() != -1)) { - query += " " + COLS.get("total_size") + " = ?" + " ,"; - values.add(format(ssTO.getTotalSize())); - } - if ((ssTO.getGuaranteedSize() != 0) || (ssTO.getGuaranteedSize() != -1)) { - query += " " + COLS.get("guar_size") + " = ?" + " ,"; - values.add(format(ssTO.getGuaranteedSize())); - } - if ((ssTO.getFreeSize() != 0) || (ssTO.getFreeSize() != -1)) { - query += " " + COLS.get("free_size") + " = ?" + " ,"; - values.add(format(ssTO.getFreeSize())); - } - if ((ssTO.getUsedSize() != 0) || (ssTO.getUsedSize() != -1)) { - query += " " + COLS.get("used_size") + " = ?" + " ,"; - values.add(format(ssTO.getUsedSize())); - } - if ((ssTO.getBusySize() != 0) || (ssTO.getBusySize() != -1)) { - query += " " + COLS.get("busy_size") + " = ?" + " ,"; - values.add(format(ssTO.getBusySize())); - } - if ((ssTO.getUnavailableSize() != 0) || (ssTO.getUnavailableSize() != -1)) { - query += " " + COLS.get("unavailable_size") + " = ?" + " ,"; - values.add(format(ssTO.getUnavailableSize())); - } - if ((ssTO.getAvailableSize() != 0) || (ssTO.getAvailableSize() != -1)) { - query += " " + COLS.get("available_size") + " = ?" + " ,"; - values.add(format(ssTO.getAvailableSize())); - } - if ((ssTO.getReservedSize() != 0) || (ssTO.getReservedSize() != -1)) { - query += " " + COLS.get("reserved_size") + " = ?" + " ,"; - values.add(format(ssTO.getReservedSize())); - } - if (ssTO.getUpdateTime() != null) { - query += " " + COLS.get("update_time") + " = ?" + " ,"; - values.add(format(ssTO.getUpdateTime())); - } - if (query.charAt(query.length() - 1) == ',') { - query = query.substring(0, query.length() - 1); - } - query += " where " + COLS.get("token") + " = ?"; - - values.add(format(format(ssTO.getSpaceToken()))); - - PreparedStatement preparedStatement = conn.prepareStatement(query); - - int index = 1; - for (String val : values) { - preparedStatement.setString(index, val); - index++; - } - - return preparedStatement; - } - - /** - * - * @param token - * String - * @param freeSpace - * long - * @return String - * @throws SQLException - */ - public PreparedStatement updateFreeSpaceByTokenQuery(Connection conn, - String token, long freeSpace, Date updateTimestamp) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "UPDATE storage_space SET free_size=?" + " , " + "UPDATE_TIME=?" - + " WHERE space_token=?"; - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setLong(1, freeSpace); - preparedStatement.setString(2, format(updateTimestamp)); - preparedStatement.setString(3, token); - - return preparedStatement; - } - - public PreparedStatement increaseUsedSpaceByTokenQuery(Connection conn, - String token, long usedSpaceToAdd) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "UPDATE storage_space " - + " SET USED_SIZE = USED_SIZE + ?, BUSY_SIZE = BUSY_SIZE + ?, " - + " FREE_SIZE = FREE_SIZE - ?, AVAILABLE_SIZE = AVAILABLE_SIZE - ?, " - + " UPDATE_TIME = NOW() " - + " WHERE space_token=? AND USED_SIZE + ? <= TOTAL_SIZE "; - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setLong(1, usedSpaceToAdd); - preparedStatement.setLong(2, usedSpaceToAdd); - preparedStatement.setLong(3, usedSpaceToAdd); - preparedStatement.setLong(4, usedSpaceToAdd); - preparedStatement.setString(5, token); - preparedStatement.setLong(6, usedSpaceToAdd); - - return preparedStatement; - - } - - public PreparedStatement decreaseUsedSpaceByTokenQuery(Connection conn, - String token, long usedSpaceToRemove) - throws SQLException { + private final static String TABLE_NAME = "storage_space"; + private final static HashMap COLS = new HashMap(); + + private static final String[] COLUMN_NAMES = + {"SS_ID", "USERDN", "VOGROUP", "ALIAS", "SPACE_TOKEN", "CREATED", "TOTAL_SIZE", "GUAR_SIZE", + "FREE_SIZE", "SPACE_FILE", "STORAGE_INFO", "LIFETIME", "SPACE_TYPE", "USED_SIZE", + "BUSY_SIZE", "UNAVAILABLE_SIZE", "AVAILABLE_SIZE", "RESERVED_SIZE", "UPDATE_TIME"}; + + static { + COLS.put("storageSpaceId", "SS_ID"); + COLS.put("ownerName", "USERDN"); + COLS.put("ownerVO", "VOGROUP"); + COLS.put("alias", "ALIAS"); + COLS.put("token", "SPACE_TOKEN"); + COLS.put("created", "CREATED"); + COLS.put("spaceFile", "SPACE_FILE"); + COLS.put("storaqeInfo", "STORAGE_INFO"); + COLS.put("lifeTime", "LIFETIME"); + COLS.put("spaceType", "SPACE_TYPE"); + COLS.put("total_size", "TOTAL_SIZE"); + COLS.put("guar_size", "GUAR_SIZE"); + COLS.put("free_size", "FREE_SIZE"); + COLS.put("used_size", "USED_SIZE"); + COLS.put("busy_size", "BUSY_SIZE"); + COLS.put("unavailable_size", "UNAVAILABLE_SIZE"); + COLS.put("available_size", "AVAILABLE_SIZE"); + COLS.put("reserved_size", "RESERVED_SIZE"); + COLS.put("update_time", "UPDATE_TIME"); + } + + /** + * + * @return String[] + */ + public String[] getColumnNames() { + + return COLUMN_NAMES; + } + + /** + * INSERT NEW ROW into TABLE + * + * @param ssTO StorageSpaceTO + * @return String + * @throws SQLException + */ + + public PreparedStatement insertQuery(Connection conn, StorageSpaceTO ssTO) throws SQLException { + + List values = Lists.newLinkedList(); + + StringBuilder fields = new StringBuilder("("); + StringBuilder placeholders = new StringBuilder("("); + + if (ssTO != null) { + if (ssTO.getOwnerName() != null) { + fields.append(COLS.get("ownerName") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getOwnerName())); + } + + fields.append(COLS.get("ownerVO") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getVoName())); + + if (ssTO.getAlias() != null) { + fields.append(COLS.get("alias") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getAlias())); + } + if (ssTO.getSpaceToken() != null) { + fields.append(COLS.get("token") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getSpaceToken())); + } + if (ssTO.getCreated() != null) { + fields.append(COLS.get("created") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getCreated())); + } + if (ssTO.getSpaceFile() != null) { + fields.append(COLS.get("spaceFile") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getSpaceFile())); + } + if (ssTO.getStorageInfo() != null) { + fields.append(COLS.get("storaqeInfo") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getStorageInfo())); + } + if (ssTO.getLifetime() != -1) { + fields.append(COLS.get("lifeTime") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getLifetime())); + } + if (ssTO.getSpaceType() != null) { + fields.append(COLS.get("spaceType") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getSpaceType())); + } + if ((ssTO.getTotalSize() != 0) || (ssTO.getTotalSize() != -1)) { + fields.append(COLS.get("total_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getTotalSize())); + } + if ((ssTO.getGuaranteedSize() != 0) || (ssTO.getGuaranteedSize() != -1)) { + fields.append(COLS.get("guar_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getGuaranteedSize())); + } + if ((ssTO.getFreeSize() != 0) || (ssTO.getFreeSize() != -1)) { + fields.append(COLS.get("free_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getFreeSize())); + } + if ((ssTO.getUsedSize() != 0) || (ssTO.getUsedSize() != -1)) { + fields.append(COLS.get("used_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getUsedSize())); + } + if ((ssTO.getBusySize() != 0) || (ssTO.getBusySize() != -1)) { + fields.append(COLS.get("busy_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getBusySize())); + } + if ((ssTO.getUnavailableSize() != 0) || (ssTO.getUnavailableSize() != -1)) { + fields.append(COLS.get("unavailable_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getUnavailableSize())); + } + + if ((ssTO.getAvailableSize() != 0) || (ssTO.getAvailableSize() != -1)) { + fields.append(COLS.get("available_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getAvailableSize())); + } + if ((ssTO.getReservedSize() != 0) || (ssTO.getReservedSize() != -1)) { + fields.append(COLS.get("reserved_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getReservedSize())); + } + if (ssTO.getUpdateTime() != null) { + fields.append(COLS.get("update_time").concat(",")); + placeholders.append("?,"); + values.add(format(ssTO.getUpdateTime())); + } + } - String str = null; - PreparedStatement preparedStatement = null; + fields.deleteCharAt(fields.length() - 1); + fields.append(")"); + placeholders.deleteCharAt(placeholders.length() - 1); + placeholders.append(")"); - str = "UPDATE storage_space " - + " SET USED_SIZE = USED_SIZE - ?, BUSY_SIZE = BUSY_SIZE - ?, " - + " FREE_SIZE = FREE_SIZE + ?, AVAILABLE_SIZE = AVAILABLE_SIZE + ?, " - + " UPDATE_TIME = NOW() " - + " WHERE space_token=? AND USED_SIZE - ? >= 0 "; + String str = "INSERT INTO " + TABLE_NAME + " " + fields.toString() + " VALUES " + + placeholders.toString(); + PreparedStatement preparedStatement = conn.prepareStatement(str); + + int index = 1; + for (String val : values) { + preparedStatement.setString(index, val); + index++; + } + + return preparedStatement; + } + + /** + * Create a StorageSpace Transfer Object coming from Result Set + * + * @param res ResultSet + * @return StorageSpaceTO + */ + public StorageSpaceTO makeStorageSpaceTO(ResultSet res) { + + StorageSpaceTO ssTO = new StorageSpaceTO(); + + try { + ssTO.setStorageSpaceId(Long.valueOf(res.getLong("SS_ID"))); + + ssTO.setOwnerName(res.getString("USERDN")); + ssTO.setVoName(res.getString("VOGROUP")); + ssTO.setAlias(res.getString("ALIAS")); + ssTO.setSpaceToken(res.getString("SPACE_TOKEN")); + + java.sql.Timestamp createdTimeStamp = res.getTimestamp("CREATED"); + Date creationDate = new Date(createdTimeStamp.getTime()); + ssTO.setCreated(creationDate); + + ssTO.setSpaceFile(res.getString("SPACE_FILE")); + ssTO.setStorageInfo(res.getString("STORAGE_INFO")); + long tempLong = res.getLong("LIFETIME"); + if (!res.wasNull()) { + ssTO.setLifetime(tempLong); + } + + ssTO.setSpaceType(res.getString("SPACE_TYPE")); + + // Sizes + tempLong = res.getLong("TOTAL_SIZE"); + if (!res.wasNull()) { + ssTO.setTotalSize(tempLong); + } + tempLong = res.getLong("GUAR_SIZE"); + if (!res.wasNull()) { + ssTO.setGuaranteedSize(tempLong); + } + tempLong = res.getLong("RESERVED_SIZE"); + if (!res.wasNull()) { + ssTO.setReservedSize(tempLong); + } + tempLong = res.getLong("FREE_SIZE"); + if (!res.wasNull()) { + ssTO.setFreeSize(tempLong); + } + tempLong = res.getLong("AVAILABLE_SIZE"); + if (!res.wasNull()) { + ssTO.setAvailableSize(tempLong); + } + tempLong = res.getLong("USED_SIZE"); + if (!res.wasNull()) { + ssTO.setUsedSize(tempLong); + } + tempLong = res.getLong("BUSY_SIZE"); + if (!res.wasNull()) { + ssTO.setBusySize(tempLong); + } + tempLong = res.getLong("UNAVAILABLE_SIZE"); + if (!res.wasNull()) { + ssTO.setUnavailableSize(tempLong); + } + + // Last Update + java.sql.Timestamp updatedTimeStamp = res.getTimestamp("UPDATE_TIME"); + Date updateDate = new Date(updatedTimeStamp.getTime()); + ssTO.setUpdateTime(updateDate); + + } catch (SQLException ex) { + ex.printStackTrace(); + } + return ssTO; + } + + // ************ HELPER Method *************** // + + /** + * @param vo + * @return + */ + private String getVOName(String vo) { + + String voStr = VO.makeNoVo().getValue(); + if (vo != null && !vo.trim().equals("")) { + voStr = vo.trim(); + } + return voStr; + } + + /** + * + * + * @param token String + * @param conn + * @return String + * @throws SQLException + */ + public PreparedStatement selectByTokenQuery(Connection conn, String token) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM storage_space where space_token=?"; + preparedStatement = conn.prepareStatement(str); + preparedStatement.setString(1, token); + + return preparedStatement; + } + + /** + * Returns the SQL string for selecting all columns from the table 'storage_space' in the + * 'storm_be_ISAM' database matching 'user' and 'spaceAlias'. 'spaceAlias' can be NULL or empty. + * + * @param user VomsGridUser. + * @param spaceAlias String. + * @return String. + * @throws SQLException + */ + public PreparedStatement selectBySpaceAliasQuery(Connection conn, GridUserInterface user, + String spaceAlias) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + String dn = user.getDn(); + + if ((spaceAlias == null) || (spaceAlias.length() == 0)) { + str = "SELECT * FROM storage_space where userdn=?"; preparedStatement = conn.prepareStatement(str); + preparedStatement.setString(1, dn); + } else { + str = "SELECT * FROM storage_space where userdn=? AND alias=?"; + preparedStatement = conn.prepareStatement(str); + preparedStatement.setString(1, dn); + preparedStatement.setString(2, spaceAlias); + } + return preparedStatement; + } + + /** + * Returns the SQL string for selecting all columns from the table 'storage_space' in the + * 'storm_be_ISAM' database matching 'user' and 'spaceAlias'. 'spaceAlias' can be NULL or empty. + * + * @param user VomsGridUser. + * @param spaceAlias String. + * @return String. + * @throws SQLException + */ + public PreparedStatement selectBySpaceAliasOnlyQuery(Connection conn, String spaceAlias) + throws SQLException { + + /* + * This is to distinguish a client reseve space with a VOSpaceArea both with the same token. + * Only the one made by the namespace process contains a fake dn + */ + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM storage_space where alias=?"; + preparedStatement = conn.prepareStatement(str); + preparedStatement.setString(1, spaceAlias); + + return preparedStatement; + } + + /** + * Returns the SQL string for selecting all columns from the table 'storage_space' in the + * 'storm_be_ISAM' database matching 'voname'. + * + * @param voname string + * @return String. + * @throws SQLException + */ + + public PreparedStatement selectBySpaceType(Connection conn, String voname) throws SQLException { + + /* + * This is to distinguish a client reseve space with a VOSpaceArea both with the same token. + * Only the one made by the namespace process contains a fake dn + */ + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM storage_space where SPACE_TYPE=?"; + preparedStatement = conn.prepareStatement(str); + preparedStatement.setString(1, voname); + + return preparedStatement; + } + + /** + * This method return the SQL query to evaluate all expired space reservation requests. + * + * @param time Current time (in second) to compare to the reservationTime + lifetime + * @return String SQL query + * @throws SQLException + */ + public PreparedStatement selectExpiredQuery(Connection conn, long currentTimeInSecond) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = + "SELECT * FROM storage_space where lifetime is not null and (UNIX_TIMESTAMP(created)+lifetime< ?)"; + preparedStatement = conn.prepareStatement(str); + preparedStatement.setLong(1, currentTimeInSecond); + + return preparedStatement; + + } + + /** + * @param size + * @return + * @throws SQLException + */ + public PreparedStatement selectByUnavailableUsedSpaceSizeQuery(Connection conn, + long unavailableSizeValue) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM storage_space where " + COLS.get("used_size") + " IS NULL or " + + COLS.get("used_size") + "=?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setLong(1, unavailableSizeValue); + + return preparedStatement; + } + + /** + * @param lastUpdateTimestamp + * @return + * @throws SQLException + */ + + public PreparedStatement selectByPreviousOrNullLastUpdateQuery(Connection conn, + long lastUpdateTimestamp) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; - preparedStatement.setLong(1, usedSpaceToRemove); - preparedStatement.setLong(2, usedSpaceToRemove); - preparedStatement.setLong(3, usedSpaceToRemove); - preparedStatement.setLong(4, usedSpaceToRemove); - preparedStatement.setString(5, token); - preparedStatement.setLong(6, usedSpaceToRemove); + str = "SELECT * FROM storage_space where " + COLS.get("update_time") + + " IS NULL or UNIX_TIMESTAMP(" + COLS.get("update_time") + ") < ?"; - return preparedStatement; + preparedStatement = conn.prepareStatement(str); + preparedStatement.setLong(1, lastUpdateTimestamp); + return preparedStatement; + + } + + /** + * Returns the SQL query for removing a row from the table 'storage_space' in the 'storm_be_ISAM' + * database matching 'userDN' and 'spaceToken'. + * + * @param user + * @param spaceToken + * @return + * @throws SQLException + */ + public PreparedStatement removeByTokenQuery(Connection conn, GridUserInterface user, + String spaceToken) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "DELETE FROM storage_space WHERE ((USERDN=?) AND (SPACE_TOKEN=?))"; + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setString(1, user.getDn()); + preparedStatement.setString(2, spaceToken); + + return preparedStatement; + } + + /** + * Returns the SQL query for removing a row from the table 'storage_space' in the 'storm_be_ISAM' + * database matching 'spaceToken'. + * + * @param spaceToken + * @return + * @throws SQLException + */ + public PreparedStatement removeByTokenQuery(Connection conn, String spaceToken) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "DELETE FROM storage_space WHERE (SPACE_TOKEN=?)"; + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setString(1, spaceToken); + + return preparedStatement; + } + + /** + * Provides a query that updates all row fields accordingly to the provided StorageSpaceTO + * + * @param ssTO + * @return + * @throws IllegalArgumentException + * @throws SQLException + */ + public PreparedStatement updateByAliasAndTokenQuery(Connection conn, StorageSpaceTO ssTO) + throws IllegalArgumentException, SQLException { + + List values = new LinkedList(); + + if (ssTO == null) { + throw new IllegalArgumentException(); + } + String query = "UPDATE storage_space SET"; + if (ssTO.getOwnerName() != null) { + query += " " + COLS.get("ownerName") + " = ?" + " ,"; + values.add(format(ssTO.getOwnerName())); + } + + query += " " + COLS.get("ownerVO") + " = ?" + " ,"; + values.add(format(getVOName(ssTO.getVoName()))); + + if (ssTO.getCreated() != null) { + query += " " + COLS.get("created") + " = ?" + " ,"; + values.add(format(ssTO.getCreated())); + } + if (ssTO.getSpaceFile() != null) { + query += " " + COLS.get("spaceFile") + " = ?" + " ,"; + values.add(format(ssTO.getSpaceFile())); + } + if (ssTO.getStorageInfo() != null) { + query += " " + COLS.get("storaqeInfo") + " = ?" + " ,"; + values.add(format(ssTO.getStorageInfo())); + } + if (ssTO.getLifetime() != -1) { + query += " " + COLS.get("lifeTime") + " = ?" + " ,"; + values.add(format(ssTO.getLifetime())); + } + if (ssTO.getSpaceType() != null) { + query += " " + COLS.get("spaceType") + " = ?" + " ,"; + values.add(format(ssTO.getSpaceType())); + } + if ((ssTO.getTotalSize() != 0) || (ssTO.getTotalSize() != -1)) { + query += " " + COLS.get("total_size") + " = ?" + " ,"; + values.add(format(ssTO.getTotalSize())); + } + if ((ssTO.getGuaranteedSize() != 0) || (ssTO.getGuaranteedSize() != -1)) { + query += " " + COLS.get("guar_size") + " = ?" + " ,"; + values.add(format(ssTO.getGuaranteedSize())); + } + if ((ssTO.getFreeSize() != 0) || (ssTO.getFreeSize() != -1)) { + query += " " + COLS.get("free_size") + " = ?" + " ,"; + values.add(format(ssTO.getFreeSize())); + } + if ((ssTO.getUsedSize() != 0) || (ssTO.getUsedSize() != -1)) { + query += " " + COLS.get("used_size") + " = ?" + " ,"; + values.add(format(ssTO.getUsedSize())); + } + if ((ssTO.getBusySize() != 0) || (ssTO.getBusySize() != -1)) { + query += " " + COLS.get("busy_size") + " = ?" + " ,"; + values.add(format(ssTO.getBusySize())); } + if ((ssTO.getUnavailableSize() != 0) || (ssTO.getUnavailableSize() != -1)) { + query += " " + COLS.get("unavailable_size") + " = ?" + " ,"; + values.add(format(ssTO.getUnavailableSize())); + } + if ((ssTO.getAvailableSize() != 0) || (ssTO.getAvailableSize() != -1)) { + query += " " + COLS.get("available_size") + " = ?" + " ,"; + values.add(format(ssTO.getAvailableSize())); + } + if ((ssTO.getReservedSize() != 0) || (ssTO.getReservedSize() != -1)) { + query += " " + COLS.get("reserved_size") + " = ?" + " ,"; + values.add(format(ssTO.getReservedSize())); + } + if (ssTO.getUpdateTime() != null) { + query += " " + COLS.get("update_time") + " = ?" + " ,"; + values.add(format(ssTO.getUpdateTime())); + } + if (query.charAt(query.length() - 1) == ',') { + query = query.substring(0, query.length() - 1); + } + query += " where " + COLS.get("alias") + " = ?" + " and " + COLS.get("token") + " = ?"; + + values.add(format(ssTO.getAlias())); + values.add(format(ssTO.getSpaceToken())); + + PreparedStatement preparedStatement = conn.prepareStatement(query); + + int index = 1; + for (String val : values) { + preparedStatement.setString(index, val); + index++; + } + + return preparedStatement; + } + + /** + * Provides a query that updates all row fields accordingly to the provided StorageSpaceTO and + * using SpaceToken as key + * + * @param ssTO + * @return + * @throws IllegalArgumentException + * @throws SQLException + */ + public PreparedStatement updateByTokenQuery(Connection conn, StorageSpaceTO ssTO) + throws IllegalArgumentException, SQLException { + + List values = new LinkedList(); + + if (ssTO == null) { + throw new IllegalArgumentException(); + } + String query = "UPDATE storage_space SET"; + if (ssTO.getOwnerName() != null) { + query += " " + COLS.get("ownerName") + " = ?" + " ,"; + values.add(format(ssTO.getOwnerName())); + } + + query += " " + COLS.get("ownerVO") + " = ?" + " ,"; + values.add((getVOName(ssTO.getVoName()))); + + if (ssTO.getCreated() != null) { + query += " " + COLS.get("created") + " = ?" + " ,"; + values.add(format(ssTO.getCreated())); + } + if (ssTO.getAlias() != null) { + query += " " + COLS.get("alias") + " = ?" + " ,"; + values.add(format(ssTO.getAlias())); + } + if (ssTO.getSpaceFile() != null) { + query += " " + COLS.get("spaceFile") + " = ?" + " ,"; + values.add(format(ssTO.getSpaceFile())); + } + if (ssTO.getStorageInfo() != null) { + query += " " + COLS.get("storaqeInfo") + " = ?" + " ,"; + values.add(format(ssTO.getStorageInfo())); + } + if (ssTO.getLifetime() != -1) { + query += " " + COLS.get("lifeTime") + " = ?" + " ,"; + values.add(format(ssTO.getLifetime())); + } + if (ssTO.getSpaceType() != null) { + query += " " + COLS.get("spaceType") + " = ?" + " ,"; + values.add(format(ssTO.getSpaceType())); + } + if ((ssTO.getTotalSize() != 0) || (ssTO.getTotalSize() != -1)) { + query += " " + COLS.get("total_size") + " = ?" + " ,"; + values.add(format(ssTO.getTotalSize())); + } + if ((ssTO.getGuaranteedSize() != 0) || (ssTO.getGuaranteedSize() != -1)) { + query += " " + COLS.get("guar_size") + " = ?" + " ,"; + values.add(format(ssTO.getGuaranteedSize())); + } + if ((ssTO.getFreeSize() != 0) || (ssTO.getFreeSize() != -1)) { + query += " " + COLS.get("free_size") + " = ?" + " ,"; + values.add(format(ssTO.getFreeSize())); + } + if ((ssTO.getUsedSize() != 0) || (ssTO.getUsedSize() != -1)) { + query += " " + COLS.get("used_size") + " = ?" + " ,"; + values.add(format(ssTO.getUsedSize())); + } + if ((ssTO.getBusySize() != 0) || (ssTO.getBusySize() != -1)) { + query += " " + COLS.get("busy_size") + " = ?" + " ,"; + values.add(format(ssTO.getBusySize())); + } + if ((ssTO.getUnavailableSize() != 0) || (ssTO.getUnavailableSize() != -1)) { + query += " " + COLS.get("unavailable_size") + " = ?" + " ,"; + values.add(format(ssTO.getUnavailableSize())); + } + if ((ssTO.getAvailableSize() != 0) || (ssTO.getAvailableSize() != -1)) { + query += " " + COLS.get("available_size") + " = ?" + " ,"; + values.add(format(ssTO.getAvailableSize())); + } + if ((ssTO.getReservedSize() != 0) || (ssTO.getReservedSize() != -1)) { + query += " " + COLS.get("reserved_size") + " = ?" + " ,"; + values.add(format(ssTO.getReservedSize())); + } + if (ssTO.getUpdateTime() != null) { + query += " " + COLS.get("update_time") + " = ?" + " ,"; + values.add(format(ssTO.getUpdateTime())); + } + if (query.charAt(query.length() - 1) == ',') { + query = query.substring(0, query.length() - 1); + } + query += " where " + COLS.get("token") + " = ?"; + + values.add(format(format(ssTO.getSpaceToken()))); + + PreparedStatement preparedStatement = conn.prepareStatement(query); + + int index = 1; + for (String val : values) { + preparedStatement.setString(index, val); + index++; + } + + return preparedStatement; + } + + /** + * + * @param token String + * @param freeSpace long + * @return String + * @throws SQLException + */ + public PreparedStatement updateFreeSpaceByTokenQuery(Connection conn, String token, + long freeSpace, Date updateTimestamp) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "UPDATE storage_space SET free_size=?" + " , " + "UPDATE_TIME=?" + " WHERE space_token=?"; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setLong(1, freeSpace); + preparedStatement.setString(2, format(updateTimestamp)); + preparedStatement.setString(3, token); + + return preparedStatement; + } + + public PreparedStatement increaseUsedSpaceByTokenQuery(Connection conn, String token, + long usedSpaceToAdd) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "UPDATE storage_space " + " SET USED_SIZE = USED_SIZE + ?, BUSY_SIZE = BUSY_SIZE + ?, " + + " FREE_SIZE = FREE_SIZE - ?, AVAILABLE_SIZE = AVAILABLE_SIZE - ?, " + + " UPDATE_TIME = NOW() " + " WHERE space_token=? AND USED_SIZE + ? <= TOTAL_SIZE "; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setLong(1, usedSpaceToAdd); + preparedStatement.setLong(2, usedSpaceToAdd); + preparedStatement.setLong(3, usedSpaceToAdd); + preparedStatement.setLong(4, usedSpaceToAdd); + preparedStatement.setString(5, token); + preparedStatement.setLong(6, usedSpaceToAdd); + + return preparedStatement; + + } + + public PreparedStatement decreaseUsedSpaceByTokenQuery(Connection conn, String token, + long usedSpaceToRemove) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "UPDATE storage_space " + " SET USED_SIZE = USED_SIZE - ?, BUSY_SIZE = BUSY_SIZE - ?, " + + " FREE_SIZE = FREE_SIZE + ?, AVAILABLE_SIZE = AVAILABLE_SIZE + ?, " + + " UPDATE_TIME = NOW() " + " WHERE space_token=? AND USED_SIZE - ? >= 0 "; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setLong(1, usedSpaceToRemove); + preparedStatement.setLong(2, usedSpaceToRemove); + preparedStatement.setLong(3, usedSpaceToRemove); + preparedStatement.setLong(4, usedSpaceToRemove); + preparedStatement.setString(5, token); + preparedStatement.setLong(6, usedSpaceToRemove); + + return preparedStatement; + + } } diff --git a/src/main/java/it/grid/storm/persistence/util/helper/TapeRecallMySQLHelper.java b/src/main/java/it/grid/storm/persistence/util/helper/TapeRecallMySQLHelper.java index 5bb0fb9bc..df31fc5a9 100644 --- a/src/main/java/it/grid/storm/persistence/util/helper/TapeRecallMySQLHelper.java +++ b/src/main/java/it/grid/storm/persistence/util/helper/TapeRecallMySQLHelper.java @@ -5,7 +5,6 @@ package it.grid.storm.persistence.util.helper; import it.grid.storm.persistence.model.TapeRecallTO; -import it.grid.storm.persistence.util.db.SQLHelper; import it.grid.storm.tape.recalltable.model.TapeRecallStatus; import java.sql.Connection; @@ -15,562 +14,428 @@ import java.util.List; import java.util.UUID; + public class TapeRecallMySQLHelper extends SQLHelper { - private static final String TABLE_NAME = "tape_recall"; - - // primary key COL_TASK_ID + COL_REQUEST_TOKEN - public static final String COL_TASK_ID = "taskId"; - public static final String COL_REQUEST_TOKEN = "requestToken"; - public static final String COL_REQUEST_TYPE = "requestType"; - public static final String COL_FILE_NAME = "fileName"; - public static final String COL_PIN_LIFETIME = "pinLifetime"; - public static final String COL_STATUS = "status"; - public static final String COL_USER_ID = "userID"; - public static final String COL_VO_NAME = "voName"; - public static final String COL_DATE = "timeStamp"; - public static final String COL_RETRY_ATTEMPT = "retryAttempt"; - public static final String COL_DEFERRED_STARTTIME = "deferredStartTime"; - public static final String COL_GROUP_TASK_ID = "groupTaskId"; - public static final String COL_IN_PROGRESS_DATE = "inProgressTime"; - public static final String COL_FINAL_STATUS_DATE = "finalStatusTime"; - - private static final String QUERY_DELETE_N_OLD_AND_COMPLETED_TASKS; - private static final String QUERY_DELETE_ALL_OLD_AND_COMPLETED_TASKS; - - static { - - QUERY_DELETE_N_OLD_AND_COMPLETED_TASKS = - "DELETE FROM tape_recall WHERE status<>1 AND status<>2 " - + "AND timeStamp <= DATE_SUB(CURRENT_TIMESTAMP(), INTERVAL ? SECOND) " - + "LIMIT ?"; - - QUERY_DELETE_ALL_OLD_AND_COMPLETED_TASKS = - "DELETE FROM tape_recall WHERE status<>1 AND status<>2 " - + "AND timeStamp <= DATE_SUB(CURRENT_TIMESTAMP(), INTERVAL ? SECOND) "; - } - - public TapeRecallMySQLHelper(String dbmsVendor) { - - super(dbmsVendor); - } - - /** - * Verifies if the given string is the name of one of the timestamp columns - * - * @param columnName - * @return - */ - private static boolean validTimestampColumnName(String columnName) { - - return COL_DATE.equals(columnName) - || COL_IN_PROGRESS_DATE.equals(columnName) - || COL_FINAL_STATUS_DATE.equals(columnName); - } - - /** - * @param conn - * @param recallTask - * @return a PreparedStatement for the requested query - */ - public PreparedStatement getQueryInsertTask(Connection conn, - TapeRecallTO recallTask) { - - if (recallTask == null) { - return null; - } - - String query = "INSERT INTO " + TABLE_NAME + " (" + COL_TASK_ID + ", " - + COL_REQUEST_TOKEN + ", " + COL_REQUEST_TYPE + ", " + COL_FILE_NAME - + ", " + COL_PIN_LIFETIME + ", " + COL_STATUS + ", " + COL_VO_NAME + ", " - + COL_USER_ID + ", " + COL_RETRY_ATTEMPT + ", " + COL_DEFERRED_STARTTIME - + ", " + COL_DATE + ", " + COL_GROUP_TASK_ID - + ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; - - try { - PreparedStatement prepStat = conn.prepareStatement(query); - - int idx = 1; - prepStat.setString(idx++, recallTask.getTaskId().toString()); - prepStat.setString(idx++, recallTask.getRequestToken().getValue()); - prepStat.setString(idx++, recallTask.getRequestType().name()); - prepStat.setString(idx++, recallTask.getFileName()); - prepStat.setInt(idx++, recallTask.getPinLifetime()); - prepStat.setInt(idx++, recallTask.getStatusId()); - - prepStat.setString(idx++, recallTask.getVoName()); - prepStat.setString(idx++, recallTask.getUserID()); - prepStat.setInt(idx++, recallTask.getRetryAttempt()); - prepStat.setTimestamp(idx++, new java.sql.Timestamp(recallTask - .getDeferredRecallInstant().getTime())); - prepStat.setTimestamp(idx++, new java.sql.Timestamp(recallTask - .getInsertionInstant().getTime())); - prepStat.setString(idx++, recallTask.getGroupTaskId().toString()); - return prepStat; - - } catch (SQLException e) { - return null; - } - } - - /** - * @param taskId - * @param requestToken - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryGetTask(Connection conn, UUID taskId, - String requestToken) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_TASK_ID + "=?" - + " AND " + COL_REQUEST_TOKEN + "=?"; - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setString(1, taskId.toString()); - preparedStatement.setString(2, requestToken); - - return preparedStatement; - } - - /** - * @param groupTaskId - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryGetGroupTasks(Connection conn, - UUID groupTaskId) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_GROUP_TASK_ID + "=?"; - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setString(1, groupTaskId.toString()); - - return preparedStatement; - } - - /** - * @param taskId - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryGetGroupTaskIds(Connection conn, UUID taskId) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT DISTINCT " + COL_GROUP_TASK_ID + " , " + COL_STATUS + " , " - + COL_IN_PROGRESS_DATE + " , " + COL_FINAL_STATUS_DATE + " FROM " - + TABLE_NAME + " WHERE " + COL_TASK_ID + "=?"; - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setString(1, taskId.toString()); - - return preparedStatement; - } - - /** - * @param taskId - * @param statuses - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryGetGroupTaskIds(Connection conn, - UUID taskId, int[] statuses) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT DISTINCT " + COL_GROUP_TASK_ID + " , " + COL_STATUS + " , " - + COL_IN_PROGRESS_DATE + " , " + COL_FINAL_STATUS_DATE + " FROM " - + TABLE_NAME + " WHERE " + COL_TASK_ID + "=?" + " AND " + COL_STATUS - + " IN ( "; - - boolean first = true; - for (int status : statuses) { - if (first) { - first = false; - } else { - str += " , "; - } - str += status; - } - str += " )"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setString(1, taskId.toString()); - - return preparedStatement; - } - - /** - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryNumberQueued(Connection conn) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; + private static final String TABLE_NAME = "tape_recall"; + + // primary key COL_TASK_ID + COL_REQUEST_TOKEN + public static final String COL_TASK_ID = "taskId"; + public static final String COL_REQUEST_TOKEN = "requestToken"; + public static final String COL_REQUEST_TYPE = "requestType"; + public static final String COL_FILE_NAME = "fileName"; + public static final String COL_PIN_LIFETIME = "pinLifetime"; + public static final String COL_STATUS = "status"; + public static final String COL_USER_ID = "userID"; + public static final String COL_VO_NAME = "voName"; + public static final String COL_DATE = "timeStamp"; + public static final String COL_RETRY_ATTEMPT = "retryAttempt"; + public static final String COL_DEFERRED_STARTTIME = "deferredStartTime"; + public static final String COL_GROUP_TASK_ID = "groupTaskId"; + public static final String COL_IN_PROGRESS_DATE = "inProgressTime"; + public static final String COL_FINAL_STATUS_DATE = "finalStatusTime"; + + private static final String QUERY_DELETE_N_OLD_AND_COMPLETED_TASKS; + private static final String QUERY_DELETE_ALL_OLD_AND_COMPLETED_TASKS; + + static { + + QUERY_DELETE_N_OLD_AND_COMPLETED_TASKS = + "DELETE FROM tape_recall WHERE status<>1 AND status<>2 " + + "AND timeStamp <= DATE_SUB(CURRENT_TIMESTAMP(), INTERVAL ? SECOND) " + "LIMIT ?"; + + QUERY_DELETE_ALL_OLD_AND_COMPLETED_TASKS = + "DELETE FROM tape_recall WHERE status<>1 AND status<>2 " + + "AND timeStamp <= DATE_SUB(CURRENT_TIMESTAMP(), INTERVAL ? SECOND) "; + } + + /** + * Verifies if the given string is the name of one of the timestamp columns + */ + private static boolean validTimestampColumnName(String columnName) { + + return COL_DATE.equals(columnName) || COL_IN_PROGRESS_DATE.equals(columnName) + || COL_FINAL_STATUS_DATE.equals(columnName); + } + + public PreparedStatement getQueryInsertTask(Connection conn, TapeRecallTO recallTask) { + + if (recallTask == null) { + return null; + } + + String query = "INSERT INTO " + TABLE_NAME + " (" + COL_TASK_ID + ", " + COL_REQUEST_TOKEN + + ", " + COL_REQUEST_TYPE + ", " + COL_FILE_NAME + ", " + COL_PIN_LIFETIME + ", " + + COL_STATUS + ", " + COL_VO_NAME + ", " + COL_USER_ID + ", " + COL_RETRY_ATTEMPT + ", " + + COL_DEFERRED_STARTTIME + ", " + COL_DATE + ", " + COL_GROUP_TASK_ID + + ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; + + try { + PreparedStatement prepStat = conn.prepareStatement(query); + + int idx = 1; + prepStat.setString(idx++, recallTask.getTaskId().toString()); + prepStat.setString(idx++, recallTask.getRequestToken().getValue()); + prepStat.setString(idx++, recallTask.getRequestType().name()); + prepStat.setString(idx++, recallTask.getFileName()); + prepStat.setInt(idx++, recallTask.getPinLifetime()); + prepStat.setInt(idx++, recallTask.getStatusId()); + + prepStat.setString(idx++, recallTask.getVoName()); + prepStat.setString(idx++, recallTask.getUserID()); + prepStat.setInt(idx++, recallTask.getRetryAttempt()); + prepStat.setTimestamp(idx++, + new java.sql.Timestamp(recallTask.getDeferredRecallInstant().getTime())); + prepStat.setTimestamp(idx++, + new java.sql.Timestamp(recallTask.getInsertionInstant().getTime())); + prepStat.setString(idx++, recallTask.getGroupTaskId().toString()); + return prepStat; + + } catch (SQLException e) { + return null; + } + } + + public PreparedStatement getQueryGetTask(Connection conn, UUID taskId, String requestToken) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_TASK_ID + "=?" + " AND " + + COL_REQUEST_TOKEN + "=?"; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setString(1, taskId.toString()); + preparedStatement.setString(2, requestToken); + + return preparedStatement; + } + + public PreparedStatement getQueryGetGroupTasks(Connection conn, UUID groupTaskId) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_GROUP_TASK_ID + "=?"; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setString(1, groupTaskId.toString()); + + return preparedStatement; + } + + public PreparedStatement getQueryGetGroupTaskIds(Connection conn, UUID taskId) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT DISTINCT " + COL_GROUP_TASK_ID + " , " + COL_STATUS + " , " + COL_IN_PROGRESS_DATE + + " , " + COL_FINAL_STATUS_DATE + " FROM " + TABLE_NAME + " WHERE " + COL_TASK_ID + "=?"; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setString(1, taskId.toString()); + + return preparedStatement; + } + + public PreparedStatement getQueryGetGroupTaskIds(Connection conn, UUID taskId, int[] statuses) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT DISTINCT " + COL_GROUP_TASK_ID + " , " + COL_STATUS + " , " + COL_IN_PROGRESS_DATE + + " , " + COL_FINAL_STATUS_DATE + " FROM " + TABLE_NAME + " WHERE " + COL_TASK_ID + "=?" + + " AND " + COL_STATUS + " IN ( "; + + boolean first = true; + for (int status : statuses) { + if (first) { + first = false; + } else { + str += " , "; + } + str += status; + } + str += " )"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setString(1, taskId.toString()); + + return preparedStatement; + } + + public PreparedStatement getQueryNumberQueued(Connection conn) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME + " WHERE " + + COL_STATUS + "=?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); + + return preparedStatement; + } + + public PreparedStatement getQueryNumberQueued(Connection conn, String voName) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME + " WHERE " + + COL_STATUS + "=?" + " AND " + COL_VO_NAME + "=?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); + preparedStatement.setString(2, voName); + + return preparedStatement; + } + + public PreparedStatement getQueryReadyForTakeOver(Connection conn) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME + " WHERE " + + COL_STATUS + "=?" + " AND " + COL_DEFERRED_STARTTIME + "<=NOW()"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); + + return preparedStatement; + } + + public PreparedStatement getQueryReadyForTakeOver(Connection conn, String voName) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME + " WHERE " + + COL_STATUS + "=?" + " AND " + COL_VO_NAME + "=?" + " AND " + COL_DEFERRED_STARTTIME + + "<=NOW()"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); + preparedStatement.setString(2, voName); + + return preparedStatement; + } + + public PreparedStatement getQueryNumberInProgress(Connection conn) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME + " WHERE " + + COL_STATUS + "=?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.IN_PROGRESS.getStatusId()); + + return preparedStatement; + } + + public PreparedStatement getQueryNumberInProgress(Connection conn, String voName) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME + " WHERE " + + COL_STATUS + "=?" + " AND " + COL_VO_NAME + "=?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.IN_PROGRESS.getStatusId()); + preparedStatement.setString(2, voName); + + return preparedStatement; + } + + public PreparedStatement getQueryGetTakeoverTasksWithDoubles(Connection conn, int numberOfTasks) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_STATUS + "=?" + " AND " + + COL_DEFERRED_STARTTIME + "<=NOW() ORDER BY " + COL_DEFERRED_STARTTIME + " LIMIT ?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); + preparedStatement.setInt(2, numberOfTasks); + + return preparedStatement; + } + + public PreparedStatement getQueryGetTakeoverTasksWithDoubles(Connection conn, int numberOfTasks, + String voName) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_STATUS + "=?" + " AND " + COL_VO_NAME + + "=?" + " AND " + COL_DEFERRED_STARTTIME + "<=NOW() ORDER BY " + COL_DEFERRED_STARTTIME + + " LIMIT ?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); + preparedStatement.setString(2, voName); + preparedStatement.setInt(3, numberOfTasks); + + return preparedStatement; + } + + /** + * Creates the query string for looking up all the information related to in progress tasks in the + * recall database. + */ + public PreparedStatement getQueryGetAllTasksInProgress(Connection conn, int numberOfTasks) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_STATUS + "=?" + " ORDER BY " + + COL_IN_PROGRESS_DATE + " ASC LIMIT ?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.IN_PROGRESS.getStatusId()); + preparedStatement.setInt(2, numberOfTasks); + + return preparedStatement; + + } + + public PreparedStatement getQueryUpdateTasksStatus(Connection conn, List taskList, + int statusId, String timestampColumn, Date timestamp) + throws IllegalArgumentException, SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + if (taskList.size() == 0) { + return null; + } + if (validTimestampColumnName(timestampColumn)) { + str = "UPDATE " + TABLE_NAME + " SET " + COL_STATUS + "=?" + " , " + timestampColumn + "=?" + + " WHERE " + COL_GROUP_TASK_ID + "=?"; + + for (int i = 1; i < taskList.size(); i++) { + str += " OR " + COL_GROUP_TASK_ID + "=?"; + } + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setInt(1, statusId); + preparedStatement.setTimestamp(2, new java.sql.Timestamp(timestamp.getTime())); + preparedStatement.setString(3, taskList.get(0).getGroupTaskId().toString()); + + int idx = 4; + for (int i = 1; i < taskList.size(); i++) { + preparedStatement.setString(idx, taskList.get(i).getGroupTaskId().toString()); + idx++; + } + } else { + throw new IllegalArgumentException( + "Unable to update row status and timestamp. The priovided timestamp column \'" + + timestampColumn + "\' is not valid"); + } + + return preparedStatement; + } + + public PreparedStatement getQueryUpdateGroupTaskStatus(Connection conn, UUID groupTaskId, + int status, String timestampColumn, Date timestamp) + throws IllegalArgumentException, SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + if (validTimestampColumnName(timestampColumn)) { + str = "UPDATE " + TABLE_NAME + " SET " + COL_STATUS + "=?" + " , " + timestampColumn + "=?" + + " WHERE " + COL_GROUP_TASK_ID + "=?" + " AND " + COL_STATUS + "!=?"; + + } else { + throw new IllegalArgumentException( + "Unable to update row status and timestamp. The priovided timestamp column \'" + + timestampColumn + "\' is not valid"); + } + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setInt(1, status); + preparedStatement.setTimestamp(2, new java.sql.Timestamp(timestamp.getTime())); + preparedStatement.setString(3, groupTaskId.toString()); + preparedStatement.setInt(4, status); + + return preparedStatement; + + } + + public PreparedStatement getQuerySetGroupTaskStatus(Connection conn, UUID groupTaskId, int status) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "UPDATE " + TABLE_NAME + " SET " + COL_STATUS + "=?" + " WHERE " + COL_GROUP_TASK_ID + + "=?" + " AND " + COL_STATUS + "!=?"; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setInt(1, status); + preparedStatement.setString(2, groupTaskId.toString()); + preparedStatement.setInt(3, status); + + return preparedStatement; + } + + public PreparedStatement getQuerySetGroupTaskRetryValue(Connection conn, UUID groupTaskId, + int value) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "UPDATE " + TABLE_NAME + " SET " + COL_RETRY_ATTEMPT + "=?" + " WHERE " + + COL_GROUP_TASK_ID + "=?"; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setInt(1, value); + preparedStatement.setString(2, groupTaskId.toString()); + + return preparedStatement; + } + + public PreparedStatement getQueryDeleteCompletedTasks(Connection con, long expirationTime) + throws SQLException { + + PreparedStatement ps = con.prepareStatement(QUERY_DELETE_ALL_OLD_AND_COMPLETED_TASKS); + ps.setLong(1, expirationTime); - str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME - + " WHERE " + COL_STATUS + "=?"; + return ps; + } - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); + public PreparedStatement getQueryDeleteCompletedTasks(Connection con, long expirationTime, + int maxNumTasks) throws SQLException { - return preparedStatement; - } - - /** - * @param voName - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryNumberQueued(Connection conn, String voName) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME - + " WHERE " + COL_STATUS + "=?" + " AND " + COL_VO_NAME + "=?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); - preparedStatement.setString(2, voName); - - return preparedStatement; - } - - /** - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryReadyForTakeOver(Connection conn) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME - + " WHERE " + COL_STATUS + "=?" + " AND " + COL_DEFERRED_STARTTIME - + "<=NOW()"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); - - return preparedStatement; - } - - /** - * @param voName - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryReadyForTakeOver(Connection conn, - String voName) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME - + " WHERE " + COL_STATUS + "=?" + " AND " + COL_VO_NAME + "=?" + " AND " - + COL_DEFERRED_STARTTIME + "<=NOW()"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); - preparedStatement.setString(2, voName); - - return preparedStatement; - } - - /** - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryNumberInProgress(Connection conn) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME - + " WHERE " + COL_STATUS + "=?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.IN_PROGRESS.getStatusId()); - - return preparedStatement; - } - - /** - * @param voName - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryNumberInProgress(Connection conn, - String voName) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME - + " WHERE " + COL_STATUS + "=?" + " AND " + COL_VO_NAME + "=?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.IN_PROGRESS.getStatusId()); - preparedStatement.setString(2, voName); - - return preparedStatement; - } - - /** - * @param numberOfTasks - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryGetTakeoverTasksWithDoubles(Connection conn, - int numberOfTasks) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_STATUS + "=?" - + " AND " + COL_DEFERRED_STARTTIME + "<=NOW() ORDER BY " - + COL_DEFERRED_STARTTIME + " LIMIT ?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); - preparedStatement.setInt(2, numberOfTasks); - - return preparedStatement; - } - - /** - * @param numberOfTasks - * @param voName - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryGetTakeoverTasksWithDoubles(Connection conn, - int numberOfTasks, String voName) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_STATUS + "=?" - + " AND " + COL_VO_NAME + "=?" + " AND " + COL_DEFERRED_STARTTIME - + "<=NOW() ORDER BY " + COL_DEFERRED_STARTTIME + " LIMIT ?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); - preparedStatement.setString(2, voName); - preparedStatement.setInt(3, numberOfTasks); - - return preparedStatement; - } - - /** - * Creates the query string for looking up all the information related to in - * progress tasks in the recall database. - * - * @param numberOfTasks - * the maximum number of task returned - * @return the query string - * @throws SQLException - */ - public PreparedStatement getQueryGetAllTasksInProgress(Connection conn, - int numberOfTasks) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_STATUS + "=?" - + " ORDER BY " + COL_IN_PROGRESS_DATE + " ASC LIMIT ?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.IN_PROGRESS.getStatusId()); - preparedStatement.setInt(2, numberOfTasks); - - return preparedStatement; - - } - - /** - * @param taskList - * @param date - * @param j - * @return - * @throws SQLException - */ - public PreparedStatement getQueryUpdateTasksStatus(Connection conn, - List taskList, int statusId, String timestampColumn, - Date timestamp) throws IllegalArgumentException, SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - if (taskList.size() == 0) { - return null; - } - if (validTimestampColumnName(timestampColumn)) { - str = "UPDATE " + TABLE_NAME + " SET " + COL_STATUS + "=?" + " , " - + timestampColumn + "=?" + " WHERE " + COL_GROUP_TASK_ID + "=?"; - - for (int i = 1; i < taskList.size(); i++) { - str += " OR " + COL_GROUP_TASK_ID + "=?"; - } - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setInt(1, statusId); - preparedStatement.setTimestamp(2, - new java.sql.Timestamp(timestamp.getTime())); - preparedStatement.setString(3, taskList.get(0).getGroupTaskId() - .toString()); - - int idx = 4; - for (int i = 1; i < taskList.size(); i++) { - preparedStatement.setString(idx, taskList.get(i).getGroupTaskId() - .toString()); - idx++; - } - } else { - throw new IllegalArgumentException( - "Unable to update row status and timestamp. The priovided timestamp column \'" - + timestampColumn + "\' is not valid"); - } - - return preparedStatement; - } - - /** - * @param groupTaskId - * @param status - * @param timestampColumn - * @param timestamp - * @return - * @throws IllegalArgumentException - * @throws SQLException - */ - public PreparedStatement getQueryUpdateGroupTaskStatus(Connection conn, - UUID groupTaskId, int status, String timestampColumn, Date timestamp) - throws IllegalArgumentException, SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - if (validTimestampColumnName(timestampColumn)) { - str = "UPDATE " + TABLE_NAME + " SET " + COL_STATUS + "=?" + " , " - + timestampColumn + "=?" + " WHERE " + COL_GROUP_TASK_ID + "=?" - + " AND " + COL_STATUS + "!=?"; - - } else { - throw new IllegalArgumentException( - "Unable to update row status and timestamp. The priovided timestamp column \'" - + timestampColumn + "\' is not valid"); - } - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setInt(1, status); - preparedStatement.setTimestamp(2, - new java.sql.Timestamp(timestamp.getTime())); - preparedStatement.setString(3, groupTaskId.toString()); - preparedStatement.setInt(4, status); - - return preparedStatement; - - } - - /** - * @param groupTaskId - * @param status - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQuerySetGroupTaskStatus(Connection conn, - UUID groupTaskId, int status) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "UPDATE " + TABLE_NAME + " SET " + COL_STATUS + "=?" + " WHERE " - + COL_GROUP_TASK_ID + "=?" + " AND " + COL_STATUS + "!=?"; - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setInt(1, status); - preparedStatement.setString(2, groupTaskId.toString()); - preparedStatement.setInt(3, status); - - return preparedStatement; - } - - /** - * @param groupTaskId - * @param value - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQuerySetGroupTaskRetryValue(Connection conn, - UUID groupTaskId, int value) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "UPDATE " + TABLE_NAME + " SET " + COL_RETRY_ATTEMPT + "=?" - + " WHERE " + COL_GROUP_TASK_ID + "=?"; - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setInt(1, value); - preparedStatement.setString(2, groupTaskId.toString()); - - return preparedStatement; - } - - /** - * @param con - * @param expirationTime - * @return the requested query as @PreparedStatement - * @throws SQLException - */ - public PreparedStatement getQueryDeleteCompletedTasks(Connection con, long expirationTime) - throws SQLException { - - PreparedStatement ps = con.prepareStatement(QUERY_DELETE_ALL_OLD_AND_COMPLETED_TASKS); - ps.setLong(1, expirationTime); - - return ps; - } - - /** - * @param con - * @param expirationTime - * @param maxNumTasks - * @return the requested query as @PreparedStatement - * @throws SQLException - */ - public PreparedStatement getQueryDeleteCompletedTasks(Connection con, long expirationTime, - int maxNumTasks) throws SQLException { - - PreparedStatement ps = con.prepareStatement(QUERY_DELETE_N_OLD_AND_COMPLETED_TASKS); + PreparedStatement ps = con.prepareStatement(QUERY_DELETE_N_OLD_AND_COMPLETED_TASKS); - ps.setLong(1, expirationTime); - ps.setInt(2, maxNumTasks); + ps.setLong(1, expirationTime); + ps.setInt(2, maxNumTasks); - return ps; - } + return ps; + } } diff --git a/src/main/java/it/grid/storm/rest/RestServer.java b/src/main/java/it/grid/storm/rest/RestServer.java index ad3923651..c116e330d 100644 --- a/src/main/java/it/grid/storm/rest/RestServer.java +++ b/src/main/java/it/grid/storm/rest/RestServer.java @@ -34,11 +34,7 @@ import it.grid.storm.info.remote.resources.SpaceStatusResource; import it.grid.storm.metrics.NamedInstrumentedSelectChannelConnector; import it.grid.storm.metrics.NamedInstrumentedThreadPool; -import it.grid.storm.namespace.remote.resource.VirtualFSResourceCompat_1_3; import it.grid.storm.namespace.remote.resource.VirtualFSResource; -import it.grid.storm.namespace.remote.resource.VirtualFSResourceCompat_1_0; -import it.grid.storm.namespace.remote.resource.VirtualFSResourceCompat_1_1; -import it.grid.storm.namespace.remote.resource.VirtualFSResourceCompat_1_2; import it.grid.storm.rest.auth.RestTokenFilter; import it.grid.storm.rest.metadata.Metadata; import it.grid.storm.tape.recalltable.providers.TapeRecallTOListMessageBodyWriter; @@ -46,17 +42,8 @@ import it.grid.storm.tape.recalltable.resources.TasksCardinality; import it.grid.storm.tape.recalltable.resources.TasksResource; -/** - * This class provides static methods for starting and stopping the storm-backend restful services. - * - * @author zappi - * @author valerioventuri - */ public class RestServer { - public static final int DEFAULT_MAX_THREAD_NUM = 100; - public static final int DEFAULT_MAX_QUEUE_SIZE = 1000; - private static final Logger LOG = LoggerFactory.getLogger(RestServer.class); private final Server server; @@ -106,10 +93,6 @@ private void configure() { resourceConfig.register(AuthorizationResource.class); resourceConfig.register(AuthorizationResourceCompat_1_0.class); resourceConfig.register(VirtualFSResource.class); - resourceConfig.register(VirtualFSResourceCompat_1_0.class); - resourceConfig.register(VirtualFSResourceCompat_1_1.class); - resourceConfig.register(VirtualFSResourceCompat_1_2.class); - resourceConfig.register(VirtualFSResourceCompat_1_3.class); resourceConfig.register(StormEAResource.class); resourceConfig.register(Metadata.class); resourceConfig.register(Ping.class); diff --git a/src/main/java/it/grid/storm/rest/metadata/Metadata.java b/src/main/java/it/grid/storm/rest/metadata/Metadata.java index 903ac27dc..8b9b7495a 100644 --- a/src/main/java/it/grid/storm/rest/metadata/Metadata.java +++ b/src/main/java/it/grid/storm/rest/metadata/Metadata.java @@ -9,17 +9,6 @@ import static javax.ws.rs.core.Response.Status.INTERNAL_SERVER_ERROR; import static javax.ws.rs.core.Response.Status.NOT_FOUND; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; -import it.grid.storm.rest.metadata.model.StoriMetadata; -import it.grid.storm.rest.metadata.service.ResourceNotFoundException; -import it.grid.storm.rest.metadata.service.ResourceService; -import it.grid.storm.rest.metadata.service.StoriMetadataService; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.File; import javax.ws.rs.GET; @@ -28,6 +17,16 @@ import javax.ws.rs.Produces; import javax.ws.rs.WebApplicationException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import it.grid.storm.namespace.Namespace; +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.rest.metadata.model.StoriMetadata; +import it.grid.storm.rest.metadata.service.ResourceNotFoundException; +import it.grid.storm.rest.metadata.service.ResourceService; +import it.grid.storm.rest.metadata.service.StoriMetadataService; + @Path("/metadata") public class Metadata { @@ -37,7 +36,7 @@ public class Metadata { public Metadata() throws NamespaceException { - NamespaceInterface namespace = NamespaceDirector.getNamespace(); + Namespace namespace = Namespace.getInstance(); metadataService = new StoriMetadataService( new ResourceService(namespace.getAllDefinedVFS(), namespace.getAllDefinedMappingRules())); } diff --git a/src/main/java/it/grid/storm/scheduler/ChunkScheduler.java b/src/main/java/it/grid/storm/scheduler/ChunkScheduler.java index 10ce38a03..c3f2b66d8 100644 --- a/src/main/java/it/grid/storm/scheduler/ChunkScheduler.java +++ b/src/main/java/it/grid/storm/scheduler/ChunkScheduler.java @@ -4,7 +4,7 @@ */ package it.grid.storm.scheduler; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,7 +44,7 @@ public class ChunkScheduler implements Scheduler, Streets { private WorkerPool ptpWorkerPool; private WorkerPool bolWorkerPool; - private ChunkScheduler(Configuration configuration) { + private ChunkScheduler(StormConfiguration configuration) { int ptgWorkerCorePoolSize = configuration.getPtGCorePoolSize(); int ptgWorkerMaxPoolSize = configuration.getPtGMaxPoolSize(); @@ -81,7 +81,7 @@ private ChunkScheduler(Configuration configuration) { public static ChunkScheduler getInstance() { if (istance == null) { - istance = new ChunkScheduler(Configuration.getInstance()); + istance = new ChunkScheduler(StormConfiguration.getInstance()); } return istance; } diff --git a/src/main/java/it/grid/storm/scheduler/ChunkTask.java b/src/main/java/it/grid/storm/scheduler/ChunkTask.java index ae6def5c4..9ce4184c4 100644 --- a/src/main/java/it/grid/storm/scheduler/ChunkTask.java +++ b/src/main/java/it/grid/storm/scheduler/ChunkTask.java @@ -4,250 +4,222 @@ */ package it.grid.storm.scheduler; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.asynch.BoL; import it.grid.storm.asynch.PtG; import it.grid.storm.asynch.PtP; import it.grid.storm.asynch.Request; import it.grid.storm.asynch.RequestChunk; import it.grid.storm.health.BookKeeper; -import it.grid.storm.health.HealthDirector; +import it.grid.storm.health.HealthMonitor; import it.grid.storm.health.LogEvent; import it.grid.storm.health.OperationType; -import java.util.ArrayList; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - *

- * Title: - *

- * - *

- * Description: - *

- * - *

- * Copyright: Copyright (c) 2005 - *

- * - *

- * Company: Project 'Grid.it' for INFN-CNAF, Bologna, Italy - *

- * - * @author Zappi Riccardo - * - * @author Michele Dibenedetto - * @version 1.1 - * - */ public class ChunkTask extends Task { - private static final Logger log = LoggerFactory - .getLogger(ChunkTask.class); - - private final Delegable todo; - private final String userDN; - private final String surl; - private final String requestToken; - - private boolean successResult = false; - - public ChunkTask(Delegable todo) { - - super(todo.getName()); - this.todo = todo; - if (todo instanceof Request) { - userDN = ((Request) todo).getUserDN(); - surl = ((Request) todo).getSURL(); - if (todo instanceof PersistentRequestChunk) { - requestToken = ((PersistentRequestChunk) todo).getRequestToken(); - } else { - requestToken = "Empty"; - } - } else { - userDN = "unknonw"; - surl = "unknonw"; - requestToken = "unknonw"; - } - } - - private boolean isAsynchTask() { - - return todo instanceof PersistentRequestChunk; - } - - private boolean isChunkTask() { - - return todo instanceof RequestChunk; - } - - public void setResult(boolean result) { - - this.successResult = result; - } - - /** - * Compares this object with the specified object for order. Note that this - * method is used by priority queue. - * - * @param o - * the Object to be compared. - * @return a negative integer, zero, or a positive integer as this object is - * less than, equal to, or greater than the specified object. - * @todo Implement this java.lang.Comparable method. In this implementation - * all chunk tasks are considered equals. - * - */ - @Override - public int compareTo(Object o) { - - return 0; - } - - /** - * When an object implementing interface Runnable is used to - * create a thread, starting the thread causes the object's run - * method to be called in that separately executing thread. - */ - @Override - public void run() { - - runEvent(); - todo.doIt(); - endEvent(); - logExecution(); - } - - protected void endEvent() { - - super.endEvent(); - if (todo instanceof Request) { - successResult = ((Request) todo).isResultSuccess(); - } - if (isAsynchTask()) { - ((PersistentRequestChunk) todo).persistStatus(); - } - if (isChunkTask()) { - ((RequestChunk) todo).updateGlobalStatus(); - } - } - - /** - * Method used to book the execution of this chunk - */ - public void logExecution() { - - if (!isAsynchTask()) { - log.debug("logExecution disabled for synch chuncks"); - return; - } - - ArrayList bks = HealthDirector.getHealthMonitor() - .getBookKeepers(); - if (bks.isEmpty()) { - return; - } - LogEvent event = new LogEvent(buildOperationType(), userDN, surl, - getStartExecutionTime(), howlongInExecution(), - requestToken, successResult); - log.debug("Booking Asynch event {}", event); - for (int i = 0; i < bks.size(); i++) { - bks.get(i).addLogEvent(event); - } - } - - /** - * @return - */ - private OperationType buildOperationType() { - - if (todo instanceof PtP) { - return OperationType.PTP; - } - if (todo instanceof PtG) { - return OperationType.PTG; - } - if (todo instanceof BoL) { - return OperationType.BOL; - } - return OperationType.UNDEF; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - - final int prime = 31; - int result = 1; - result = prime * result + (isAsynchTask() ? 1231 : 1237); - result = prime * result - + ((requestToken == null) ? 0 : requestToken.hashCode()); - result = prime * result + (successResult ? 1231 : 1237); - result = prime * result + ((surl == null) ? 0 : surl.hashCode()); - result = prime * result + ((todo == null) ? 0 : todo.hashCode()); - result = prime * result + ((userDN == null) ? 0 : userDN.hashCode()); - return result; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object obj) { - - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - ChunkTask other = (ChunkTask) obj; - if (isAsynchTask() != other.isAsynchTask()) { - return false; - } - if (requestToken == null) { - if (other.requestToken != null) { - return false; - } - } else if (!requestToken.equals(other.requestToken)) { - return false; - } - if (successResult != other.successResult) { - return false; - } - if (surl == null) { - if (other.surl != null) { - return false; - } - } else if (!surl.equals(other.surl)) { - return false; - } - if (todo == null) { - if (other.todo != null) { - return false; - } - } else if (!todo.equals(other.todo)) { - return false; - } - if (userDN == null) { - if (other.userDN != null) { - return false; - } - } else if (!userDN.equals(other.userDN)) { - return false; - } - return true; - } + private static final Logger log = LoggerFactory.getLogger(ChunkTask.class); + + private final Delegable todo; + private final String userDN; + private final String surl; + private final String requestToken; + + private boolean successResult = false; + + public ChunkTask(Delegable todo) { + + super(todo.getName()); + this.todo = todo; + if (todo instanceof Request) { + userDN = ((Request) todo).getUserDN(); + surl = ((Request) todo).getSURL(); + if (todo instanceof PersistentRequestChunk) { + requestToken = ((PersistentRequestChunk) todo).getRequestToken(); + } else { + requestToken = "Empty"; + } + } else { + userDN = "unknonw"; + surl = "unknonw"; + requestToken = "unknonw"; + } + } + + private boolean isAsynchTask() { + + return todo instanceof PersistentRequestChunk; + } + + private boolean isChunkTask() { + + return todo instanceof RequestChunk; + } + + public void setResult(boolean result) { + + this.successResult = result; + } + + /** + * Compares this object with the specified object for order. Note that this method is used by + * priority queue. + * + * @param o the Object to be compared. + * @return a negative integer, zero, or a positive integer as this object is less than, equal to, + * or greater than the specified object. + * @todo Implement this java.lang.Comparable method. In this implementation all chunk tasks are + * considered equals. + * + */ + @Override + public int compareTo(Object o) { + + return 0; + } + + /** + * When an object implementing interface Runnable is used to create a thread, + * starting the thread causes the object's run method to be called in that separately + * executing thread. + */ + @Override + public void run() { + + runEvent(); + todo.doIt(); + endEvent(); + logExecution(); + } + + protected void endEvent() { + + super.endEvent(); + if (todo instanceof Request) { + successResult = ((Request) todo).isResultSuccess(); + } + if (isAsynchTask()) { + ((PersistentRequestChunk) todo).persistStatus(); + } + if (isChunkTask()) { + ((RequestChunk) todo).updateGlobalStatus(); + } + } + + /** + * Method used to book the execution of this chunk + */ + public void logExecution() { + + if (!isAsynchTask()) { + log.debug("logExecution disabled for synch chuncks"); + return; + } + + List bks = HealthMonitor.getInstance().getBookKeepers(); + if (bks.isEmpty()) { + return; + } + LogEvent event = new LogEvent(buildOperationType(), userDN, surl, getStartExecutionTime(), + howlongInExecution(), requestToken, successResult); + log.debug("Booking Asynch event {}", event); + for (int i = 0; i < bks.size(); i++) { + bks.get(i).addLogEvent(event); + } + } + + /** + * @return + */ + private OperationType buildOperationType() { + + if (todo instanceof PtP) { + return OperationType.PTP; + } + if (todo instanceof PtG) { + return OperationType.PTG; + } + if (todo instanceof BoL) { + return OperationType.BOL; + } + return OperationType.UNDEF; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + + final int prime = 31; + int result = 1; + result = prime * result + (isAsynchTask() ? 1231 : 1237); + result = prime * result + ((requestToken == null) ? 0 : requestToken.hashCode()); + result = prime * result + (successResult ? 1231 : 1237); + result = prime * result + ((surl == null) ? 0 : surl.hashCode()); + result = prime * result + ((todo == null) ? 0 : todo.hashCode()); + result = prime * result + ((userDN == null) ? 0 : userDN.hashCode()); + return result; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object obj) { + + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + ChunkTask other = (ChunkTask) obj; + if (isAsynchTask() != other.isAsynchTask()) { + return false; + } + if (requestToken == null) { + if (other.requestToken != null) { + return false; + } + } else if (!requestToken.equals(other.requestToken)) { + return false; + } + if (successResult != other.successResult) { + return false; + } + if (surl == null) { + if (other.surl != null) { + return false; + } + } else if (!surl.equals(other.surl)) { + return false; + } + if (todo == null) { + if (other.todo != null) { + return false; + } + } else if (!todo.equals(other.todo)) { + return false; + } + if (userDN == null) { + if (other.userDN != null) { + return false; + } + } else if (!userDN.equals(other.userDN)) { + return false; + } + return true; + } } diff --git a/src/main/java/it/grid/storm/scheduler/CrusherScheduler.java b/src/main/java/it/grid/storm/scheduler/CrusherScheduler.java index b7faa52d8..0b740d7a3 100644 --- a/src/main/java/it/grid/storm/scheduler/CrusherScheduler.java +++ b/src/main/java/it/grid/storm/scheduler/CrusherScheduler.java @@ -4,7 +4,7 @@ */ package it.grid.storm.scheduler; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -37,7 +37,7 @@ public class CrusherScheduler implements Scheduler { private static CrusherScheduler istance = null; - private CrusherScheduler(Configuration configuration) { + private CrusherScheduler(StormConfiguration configuration) { workerCorePoolSize = configuration.getCorePoolSize(); workerMaxPoolSize = configuration.getMaxPoolSize(); @@ -51,7 +51,7 @@ public static CrusherScheduler getInstance() { log.trace("CrusherScheduler.getInstance"); if (istance == null) { - istance = new CrusherScheduler(Configuration.getInstance()); + istance = new CrusherScheduler(StormConfiguration.getInstance()); } return istance; } diff --git a/src/main/java/it/grid/storm/space/SimpleSpaceUpdaterHelper.java b/src/main/java/it/grid/storm/space/SimpleSpaceUpdaterHelper.java index 096be8131..675c6d38b 100644 --- a/src/main/java/it/grid/storm/space/SimpleSpaceUpdaterHelper.java +++ b/src/main/java/it/grid/storm/space/SimpleSpaceUpdaterHelper.java @@ -12,72 +12,65 @@ public class SimpleSpaceUpdaterHelper implements SpaceUpdaterHelperInterface { - private static final Logger log = LoggerFactory - .getLogger(SimpleSpaceUpdaterHelper.class); - - private ReservedSpaceCatalog rsc; - - public SimpleSpaceUpdaterHelper() { - rsc = new ReservedSpaceCatalog(); - } - - private StorageSpaceData getStorageSpaceDataForVFS(VirtualFS vfs) { - - return rsc.getStorageSpaceByAlias(vfs.getSpaceTokenDescription()); - } - - @Override - public boolean increaseUsedSpace(VirtualFS vfs, long size) { - - log.debug("Increase {} used space: {} bytes ", vfs.getAliasName(), size); - - if (size < 0) { - log.error("Size to add is a negative value: {}", size); - return false; - } - if (size == 0) { - log.debug("Size is zero, vfs {} used space won't be increased!", - vfs.getAliasName()); - return true; - } - - log.debug("Get StorageSpaceData from vfs ..."); - StorageSpaceData ssd = getStorageSpaceDataForVFS(vfs); - - if (ssd == null) { - log.error("Unable to get StorageSpaceData from alias name {}", - vfs.getAliasName()); - return false; - } - - return rsc.increaseUsedSpace(ssd.getSpaceToken().getValue(), size); - } - - @Override - public boolean decreaseUsedSpace(VirtualFS vfs, long size) { - - log.debug("Decrease {} used space: {} bytes ", vfs.getAliasName(), size); - - if (size < 0) { - log.error("Size to remove is a negative value: {}", size); - return false; - } - if (size == 0) { - log.debug("Size is zero, vfs {} used space won't be decreased!", - vfs.getAliasName()); - return true; - } - - log.debug("Get StorageSpaceData from vfs ..."); - StorageSpaceData ssd = getStorageSpaceDataForVFS(vfs); - - if (ssd == null) { - log.error("Unable to get StorageSpaceData from alias name {}", - vfs.getAliasName()); - return false; - } - - return rsc.decreaseUsedSpace(ssd.getSpaceToken().getValue(), size); - } + private static final Logger log = LoggerFactory.getLogger(SimpleSpaceUpdaterHelper.class); + + + private StorageSpaceData getStorageSpaceDataForVFS(VirtualFS vfs) { + + return ReservedSpaceCatalog.getInstance() + .getStorageSpaceByAlias(vfs.getSpaceTokenDescription()); + } + + @Override + public boolean increaseUsedSpace(VirtualFS vfs, long size) { + + log.debug("Increase {} used space: {} bytes ", vfs.getAliasName(), size); + + if (size < 0) { + log.error("Size to add is a negative value: {}", size); + return false; + } + if (size == 0) { + log.debug("Size is zero, vfs {} used space won't be increased!", vfs.getAliasName()); + return true; + } + + log.debug("Get StorageSpaceData from vfs ..."); + StorageSpaceData ssd = getStorageSpaceDataForVFS(vfs); + + if (ssd == null) { + log.error("Unable to get StorageSpaceData from alias name {}", vfs.getAliasName()); + return false; + } + + return ReservedSpaceCatalog.getInstance() + .increaseUsedSpace(ssd.getSpaceToken().getValue(), size); + } + + @Override + public boolean decreaseUsedSpace(VirtualFS vfs, long size) { + + log.debug("Decrease {} used space: {} bytes ", vfs.getAliasName(), size); + + if (size < 0) { + log.error("Size to remove is a negative value: {}", size); + return false; + } + if (size == 0) { + log.debug("Size is zero, vfs {} used space won't be decreased!", vfs.getAliasName()); + return true; + } + + log.debug("Get StorageSpaceData from vfs ..."); + StorageSpaceData ssd = getStorageSpaceDataForVFS(vfs); + + if (ssd == null) { + log.error("Unable to get StorageSpaceData from alias name {}", vfs.getAliasName()); + return false; + } + + return ReservedSpaceCatalog.getInstance() + .decreaseUsedSpace(ssd.getSpaceToken().getValue(), size); + } } diff --git a/src/main/java/it/grid/storm/space/SpaceHelper.java b/src/main/java/it/grid/storm/space/SpaceHelper.java index ddbc40d6f..edb73f5c1 100644 --- a/src/main/java/it/grid/storm/space/SpaceHelper.java +++ b/src/main/java/it/grid/storm/space/SpaceHelper.java @@ -5,25 +5,19 @@ package it.grid.storm.space; import java.util.Iterator; -import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import it.grid.storm.catalogs.InvalidRetrievedDataException; -import it.grid.storm.catalogs.InvalidSpaceDataAttributesException; -import it.grid.storm.catalogs.MultipleDataEntriesException; -import it.grid.storm.catalogs.NoDataFoundException; import it.grid.storm.catalogs.ReservedSpaceCatalog; import it.grid.storm.common.types.InvalidPFNAttributeException; import it.grid.storm.common.types.PFN; -import it.grid.storm.common.types.SizeUnit; -import it.grid.storm.config.Configuration; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.exceptions.InvalidSpaceDataAttributesException; import it.grid.storm.persistence.model.TransferObjectDecodingException; import it.grid.storm.srm.types.ArrayOfTSpaceToken; import it.grid.storm.srm.types.InvalidTSizeAttributesException; @@ -34,365 +28,307 @@ public class SpaceHelper { - private static final int ADD_FREE_SPACE = 0; - private static final int REMOVE_FREE_SPACE = 1; - private Configuration config; - private static final Logger log = LoggerFactory.getLogger(SpaceHelper.class); - public static GridUserInterface storageAreaOwner = GridUserManager - .makeSAGridUser(); - - public SpaceHelper() { - - config = Configuration.getInstance(); - } - - public boolean isSAFull(Logger log, StoRI stori) { - - log.debug("Checking if the Storage Area is full"); - - VirtualFS fs = stori.getVirtualFileSystem(); - ReservedSpaceCatalog catalog = new ReservedSpaceCatalog(); - - // Get StorageSpaceData from the database - String ssDesc = fs.getSpaceTokenDescription(); - StorageSpaceData spaceData = catalog.getStorageSpaceByAlias(ssDesc); - - if ((spaceData != null) && (spaceData.getAvailableSpaceSize().value() == 0)) { - log.debug("AvailableSize={}" , spaceData.getAvailableSpaceSize().value()); - return true; - } else { - return false; - } - - } - - public long getSAFreeSpace(Logger log, StoRI stori) { - - log.debug("Checking if the Storage Area is full"); - - VirtualFS fs = stori.getVirtualFileSystem(); - ReservedSpaceCatalog catalog = new ReservedSpaceCatalog(); - - // Get StorageSpaceData from the database - String ssDesc = fs.getSpaceTokenDescription(); - StorageSpaceData spaceData = catalog.getStorageSpaceByAlias(ssDesc); - - if (spaceData != null) { - return spaceData.getAvailableSpaceSize().value(); - } else { - return -1; - } - - } - - /** - * Verifies if the storage area to which the provided stori belongs has been - * initialized The verification is made on used space field - * - * @param log - * @param stori - * @return - */ - public boolean isSAInitialized(Logger log, StoRI stori) { - - log.debug("Checking if the Storage Area is initialized"); - if (stori == null) { - throw new IllegalArgumentException( - "Unable to perform the SA initialization check, provided null parameters: log : " - + log + " , stori : " + stori); - } - boolean response = false; - VirtualFS fs = stori.getVirtualFileSystem(); - ReservedSpaceCatalog catalog = new ReservedSpaceCatalog(); - // Get StorageSpaceData from the database - String ssDesc = fs.getSpaceTokenDescription(); - - StorageSpaceData spaceData = catalog.getStorageSpaceByAlias(ssDesc); - - if (spaceData != null && spaceData.getUsedSpaceSize() != null - && !spaceData.getUsedSpaceSize().isEmpty() - && spaceData.getUsedSpaceSize().value() >= 0) { - - response = true; - } - log.debug("The storage area is initialized with token alias {} is {} initialized" - , spaceData.getSpaceTokenAlias() , (response ? "" : "not")); - return response; - } - - /** - * - * @param log - * @param stori - * @return - */ - public TSpaceToken getTokenFromStoRI(Logger log, StoRI stori) { - - log.debug("SpaceHelper: getting space token from StoRI"); - VirtualFS fs = stori.getVirtualFileSystem(); - return fs.getSpaceToken(); - - } - - /** - * Returns the spaceTokens associated to the 'user' AND 'spaceAlias'. If - * 'spaceAlias' is NULL or an empty string then this method returns all the - * space tokens this 'user' owns. - * - * @param user - * VomsGridUser user. - * @param spaceAlias - * User space token description. - */ - private Boolean isDefaultSpaceToken(TSpaceToken token) { - - Boolean found = false; - - config = Configuration.getInstance(); - List tokens = config.getListOfDefaultSpaceToken(); - for (int i = 0; i < tokens.size(); i++) { - if ((tokens.get(i)).toLowerCase().equals(token.getValue().toLowerCase())) { - found = true; - } - } - - return found; - } - - /** - * This method is used by the namespace parser component to insert a new Space - * Token Description data into the space catalog. In this way a standard Space - * Token is created, making it work for the GetSpaceMetaData request an - * SrmPreparateToPut with SpaceToken. - * - * The following code check if a SA_token with the same space description is - * already present into the catalog, if no data are found the new data are - * inserted, if yes the new data and the data already present are compared, - * and if needed an update operation is performed. - * - * The mandatory parameters are: - * - * @param spaceTokenAlias - * the space token description the user have to specify into the - * namespace.xml file - * @param totalOnLineSize - * the size the user have to specify into the namespace.xml file - * @param date - * @param spaceFileName - * the space file name will be used to get the free size. It is the - * StFNRoot. - */ - - public TSpaceToken createVOSA_Token(String spaceTokenAlias, - TSizeInBytes totalOnLineSize, String spaceFileName) { - - // TODO errors are not managed in this function - TSpaceToken spaceToken = null; - ArrayOfTSpaceToken tokenArray; - ReservedSpaceCatalog spaceCatalog = new ReservedSpaceCatalog(); - - // Try with fake user, if it does not work remove it and use different - // method - - // First, check if the same VOSpaceArea already exists - tokenArray = spaceCatalog.getSpaceTokensByAlias(spaceTokenAlias); - - if (tokenArray == null || tokenArray.size() == 0) { - // the VOSpaceArea does not exist yet - SpaceHelper.log.debug("VoSpaceArea {} still does not exists. Start creation process." , spaceTokenAlias); - - PFN sfname = null; - try { - sfname = PFN.make(spaceFileName); - } catch (InvalidPFNAttributeException e1) { - log.error("Error building PFN with {} : " , spaceFileName , e1); - } - - StorageSpaceData ssd = null; - - try { - ssd = new StorageSpaceData(storageAreaOwner, TSpaceType.VOSPACE, - spaceTokenAlias, totalOnLineSize, totalOnLineSize, - TLifeTimeInSeconds.makeInfinite(), null, null, sfname); - // ssd.setReservedSpaceSize(totalOnLineSize); - try { - ssd.setUnavailableSpaceSize(TSizeInBytes.make(0, SizeUnit.BYTES)); - ssd.setReservedSpaceSize(TSizeInBytes.make(0, SizeUnit.BYTES)); - - } catch (InvalidTSizeAttributesException e) { - // never thrown - log.error("Unexpected InvalidTSizeAttributesException: {}" - , e.getMessage(),e); - } - spaceToken = ssd.getSpaceToken(); - } catch (InvalidSpaceDataAttributesException e) { - log.error("Error building StorageSpaceData: " , e); - } - - try { - spaceCatalog.addStorageSpace(ssd); - } catch (DataAccessException e) { - log.error("Error storing StorageSpaceData on the DB: " , e); - } - // Track into global set to remove obsolete SA_token - ReservedSpaceCatalog.addSpaceToken(spaceToken); - - } else { - /* - * the VOspaceArea already exists. Compare new data and data already - * present to check if the parameter has changed or not, and then perform - * update operation into catalog if it is needed. Only static information - * changes determine an update of the exeisting row - */ - SpaceHelper.log.debug("VOSpaceArea for space token description " - + spaceTokenAlias + " already present into DB."); - - boolean equal = false; - spaceToken = tokenArray.getTSpaceToken(0); - StorageSpaceData catalog_ssd = null; - try { - catalog_ssd = spaceCatalog.getStorageSpace(spaceToken); - } catch (TransferObjectDecodingException e) { - log - .error("Unable to build StorageSpaceData from StorageSpaceTO. TransferObjectDecodingException: {}" - , e.getMessage(),e); - } catch (DataAccessException e) { - log.error("Unable to build get StorageSpaceTO. DataAccessException: {}" - , e.getMessage(),e); - } - - if (catalog_ssd != null) { - - if (catalog_ssd.getOwner().getDn().equals(storageAreaOwner.getDn()) - && (catalog_ssd.getSpaceTokenAlias().equals(spaceTokenAlias)) - && (catalog_ssd.getTotalSpaceSize().value() == totalOnLineSize - .value()) - && (catalog_ssd.getSpaceFileName().toString().equals(spaceFileName))) { - equal = true; - } - - } - - // false otherwise - if (equal) { - // Do nothing if equals, everything are already present into - // the DB - SpaceHelper.log.debug("VOSpaceArea for space token description {} is already up to date." - , spaceTokenAlias); - ReservedSpaceCatalog.addSpaceToken(spaceToken); - - } else { - // If the new data has been modified, update the data into the - // catalog - SpaceHelper.log.debug("VOSpaceArea for space token description {} is different in some parameters. Updating the catalog." - , spaceTokenAlias); - try { - catalog_ssd.setOwner(storageAreaOwner); - catalog_ssd.setTotalSpaceSize(totalOnLineSize); - catalog_ssd.setTotalGuaranteedSize(totalOnLineSize); - - PFN sfn = null; - try { - sfn = PFN.make(spaceFileName); - } catch (InvalidPFNAttributeException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - catalog_ssd.setSpaceFileName(sfn); - - spaceCatalog.updateAllStorageSpace(catalog_ssd); - ReservedSpaceCatalog.addSpaceToken(spaceToken); - - } catch (NoDataFoundException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } catch (InvalidRetrievedDataException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } catch (MultipleDataEntriesException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - - } - - // Warning. CHeck if there are multiple token with same alisa, this - // is not allowed - if (tokenArray.size() > 1) { - SpaceHelper.log - .error("Error: multiple Space Token found for the same space Alias: {}. Only one has been evaluated!" - , spaceTokenAlias); - } - - } - return spaceToken; - - } - - /** - * This method should be use at the end of the namespace insert process - * (through the createVO_SA_token(...)) to remmove from the database the old - * VO_SA_token inserted from the previous namsespace.xml configuration - * - */ - public void purgeOldVOSA_token() { - - purgeOldVOSA_token(SpaceHelper.log); - } - - public void purgeOldVOSA_token(Logger log) { - - ReservedSpaceCatalog spacec = new ReservedSpaceCatalog(); - log.debug("VO SA: garbage collecting obsolete VOSA_token"); - - Iterator iter = ReservedSpaceCatalog.getTokenSet().iterator(); - while (iter.hasNext()) { - log.debug("VO SA token REGISTRED: {}" , iter.next().getValue()); - } - - GridUserInterface stormServiceUser = GridUserManager.makeSAGridUser(); - - // Remove obsolete space - ArrayOfTSpaceToken token_a = spacec.getSpaceTokens(stormServiceUser, null); - for (int i = 0; i < token_a.size(); i++) { - log.debug("VO SA token IN CATALOG: {}" , token_a.getTSpaceToken(i).getValue()); - } - - if ((token_a != null) && (token_a.size() > 0)) { - for (int i = 0; i < token_a.size(); i++) { - - if (!ReservedSpaceCatalog.getTokenSet().contains( - token_a.getTSpaceToken(i))) { - // This VOSA_token is no more used, removing it from persistence - TSpaceToken tokenToRemove = token_a.getTSpaceToken(i); - log.debug("VO SA token {} is no more used, removing it from persistence." , tokenToRemove); - spacec.release(stormServiceUser, tokenToRemove); - } - } - } else { - log - .warn("Space Catalog garbage SA_Token: no SA TOKENs specified. Please check your namespace.xml file."); - } - - ReservedSpaceCatalog.clearTokenSet(); - - } - - /** - * @param spaceData - * @return - */ - public static boolean isStorageArea(StorageSpaceData spaceData) - throws IllegalArgumentException { - - if (spaceData == null) { - log.error("Received null spaceData parameter"); - throw new IllegalArgumentException("Received null spaceData parameter"); - } - boolean result = false; - if (spaceData.getOwner() != null) { - result = spaceData.getOwner().equals(SpaceHelper.storageAreaOwner); - } - return result; - } + private static final Logger log = LoggerFactory.getLogger(SpaceHelper.class); + public static GridUserInterface storageAreaOwner = GridUserManager.makeSAGridUser(); + + public boolean isSAFull(Logger log, StoRI stori) { + + log.debug("Checking if the Storage Area is full"); + + VirtualFS fs = stori.getVirtualFileSystem(); + ReservedSpaceCatalog catalog = ReservedSpaceCatalog.getInstance(); + + // Get StorageSpaceData from the database + String ssDesc = fs.getSpaceTokenDescription(); + StorageSpaceData spaceData = catalog.getStorageSpaceByAlias(ssDesc); + + if ((spaceData != null) && (spaceData.getAvailableSpaceSize().value() == 0)) { + log.debug("AvailableSize={}", spaceData.getAvailableSpaceSize().value()); + return true; + } else { + return false; + } + + } + + public long getSAFreeSpace(Logger log, StoRI stori) { + + log.debug("Checking if the Storage Area is full"); + + VirtualFS fs = stori.getVirtualFileSystem(); + ReservedSpaceCatalog catalog = ReservedSpaceCatalog.getInstance(); + + // Get StorageSpaceData from the database + String ssDesc = fs.getSpaceTokenDescription(); + StorageSpaceData spaceData = catalog.getStorageSpaceByAlias(ssDesc); + + if (spaceData != null) { + return spaceData.getAvailableSpaceSize().value(); + } else { + return -1; + } + + } + + /** + * Verifies if the storage area to which the provided stori belongs has been initialized The + * verification is made on used space field + * + * @param log + * @param stori + * @return + */ + public boolean isSAInitialized(Logger log, StoRI stori) { + + log.debug("Checking if the Storage Area is initialized"); + if (stori == null) { + throw new IllegalArgumentException( + "Unable to perform the SA initialization check, provided null parameters: log : " + log + + " , stori : " + stori); + } + boolean response = false; + VirtualFS fs = stori.getVirtualFileSystem(); + ReservedSpaceCatalog catalog = ReservedSpaceCatalog.getInstance(); + // Get StorageSpaceData from the database + String ssDesc = fs.getSpaceTokenDescription(); + + StorageSpaceData spaceData = catalog.getStorageSpaceByAlias(ssDesc); + + if (spaceData != null && spaceData.getUsedSpaceSize() != null + && !spaceData.getUsedSpaceSize().isEmpty() && spaceData.getUsedSpaceSize().value() >= 0) { + + response = true; + } + log.debug("The storage area is initialized with token alias {} is {} initialized", + spaceData.getSpaceTokenAlias(), (response ? "" : "not")); + return response; + } + + /** + * + * @param log + * @param stori + * @return + */ + public TSpaceToken getTokenFromStoRI(Logger log, StoRI stori) { + + log.debug("SpaceHelper: getting space token from StoRI"); + VirtualFS fs = stori.getVirtualFileSystem(); + return fs.getSpaceToken(); + + } + + /** + * This method is used by the namespace parser component to insert a new Space Token Description + * data into the space catalog. In this way a standard Space Token is created, making it work for + * the GetSpaceMetaData request an SrmPreparateToPut with SpaceToken. + * + * The following code check if a SA_token with the same space description is already present into + * the catalog, if no data are found the new data are inserted, if yes the new data and the data + * already present are compared, and if needed an update operation is performed. + * + * The mandatory parameters are: + * + * @param spaceTokenAlias the space token description the user have to specify into the + * namespace.xml file + * @param totalOnLineSize the size the user have to specify into the namespace.xml file + * @param date + * @param spaceFileName the space file name will be used to get the free size. It is the StFNRoot. + */ + + public TSpaceToken createVOSA_Token(String spaceTokenAlias, TSizeInBytes totalOnLineSize, + String spaceFileName) { + + // TODO errors are not managed in this function + TSpaceToken spaceToken = null; + ArrayOfTSpaceToken tokenArray; + ReservedSpaceCatalog catalog = ReservedSpaceCatalog.getInstance(); + + // Try with fake user, if it does not work remove it and use different + // method + + // First, check if the same VOSpaceArea already exists + tokenArray = catalog.getSpaceTokensByAlias(spaceTokenAlias); + + if (tokenArray == null || tokenArray.size() == 0) { + // the VOSpaceArea does not exist yet + SpaceHelper.log.debug("VoSpaceArea {} still does not exists. Start creation process.", + spaceTokenAlias); + + PFN sfname = null; + try { + sfname = PFN.make(spaceFileName); + } catch (InvalidPFNAttributeException e1) { + log.error("Error building PFN with {} : ", spaceFileName, e1); + } + + StorageSpaceData ssd = null; + + try { + ssd = new StorageSpaceData(storageAreaOwner, TSpaceType.VOSPACE, spaceTokenAlias, + totalOnLineSize, totalOnLineSize, TLifeTimeInSeconds.makeInfinite(), null, null, + sfname); + // ssd.setReservedSpaceSize(totalOnLineSize); + try { + ssd.setUnavailableSpaceSize(TSizeInBytes.make(0)); + ssd.setReservedSpaceSize(TSizeInBytes.make(0)); + + } catch (InvalidTSizeAttributesException e) { + // never thrown + log.error("Unexpected InvalidTSizeAttributesException: {}", e.getMessage(), e); + } + spaceToken = ssd.getSpaceToken(); + } catch (InvalidSpaceDataAttributesException e) { + log.error("Error building StorageSpaceData: ", e); + } + + try { + catalog.addStorageSpace(ssd); + } catch (DataAccessException e) { + log.error("Error storing StorageSpaceData on the DB: ", e); + } + // Track into global set to remove obsolete SA_token + ReservedSpaceCatalog.addSpaceToken(spaceToken); + + } else { + /* + * the VOspaceArea already exists. Compare new data and data already present to check if the + * parameter has changed or not, and then perform update operation into catalog if it is + * needed. Only static information changes determine an update of the existing row + */ + SpaceHelper.log.debug("VOSpaceArea for space token description " + spaceTokenAlias + + " already present into DB."); + + boolean equal = false; + spaceToken = tokenArray.getTSpaceToken(0); + StorageSpaceData catalog_ssd = null; + try { + catalog_ssd = catalog.getStorageSpace(spaceToken); + } catch (TransferObjectDecodingException e) { + log.error( + "Unable to build StorageSpaceData from StorageSpaceTO. TransferObjectDecodingException: {}", + e.getMessage(), e); + } catch (DataAccessException e) { + log.error("Unable to build get StorageSpaceTO. DataAccessException: {}", e.getMessage(), e); + } + + if (catalog_ssd != null) { + + if (catalog_ssd.getOwner().getDn().equals(storageAreaOwner.getDn()) + && (catalog_ssd.getSpaceTokenAlias().equals(spaceTokenAlias)) + && (catalog_ssd.getTotalSpaceSize().value() == totalOnLineSize.value()) + && (catalog_ssd.getSpaceFileName().toString().equals(spaceFileName))) { + equal = true; + } + + } + + // false otherwise + if (equal) { + // Do nothing if equals, everything are already present into the DB + SpaceHelper.log.debug("VOSpaceArea for space token description {} is already up to date.", + spaceTokenAlias); + ReservedSpaceCatalog.addSpaceToken(spaceToken); + + } else { + + // If the new data has been modified, update the data into the catalog + SpaceHelper.log.debug( + "VOSpaceArea for space token description {} is different in some parameters. Updating the catalog.", + spaceTokenAlias); + catalog_ssd.setOwner(storageAreaOwner); + catalog_ssd.setTotalSpaceSize(totalOnLineSize); + catalog_ssd.setTotalGuaranteedSize(totalOnLineSize); + + PFN sfn = null; + try { + sfn = PFN.make(spaceFileName); + } catch (InvalidPFNAttributeException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + catalog_ssd.setSpaceFileName(sfn); + + catalog.updateAllStorageSpace(catalog_ssd); + ReservedSpaceCatalog.addSpaceToken(spaceToken); + } + + // Warning. CHeck if there are multiple token with same alisa, this + // is not allowed + if (tokenArray.size() > 1) { + SpaceHelper.log.error( + "Error: multiple Space Token found for the same space Alias: {}. Only one has been evaluated!", + spaceTokenAlias); + } + + } + return spaceToken; + + } + + /** + * This method should be use at the end of the namespace insert process (through the + * createVO_SA_token(...)) to remove from the database the old VO_SA_token inserted from the + * previous namsespace.xml configuration + * + */ + public void purgeOldVOSA_token() { + + purgeOldVOSA_token(SpaceHelper.log); + } + + public void purgeOldVOSA_token(Logger log) { + + ReservedSpaceCatalog catalog = ReservedSpaceCatalog.getInstance(); + log.debug("VO SA: garbage collecting obsolete VOSA_token"); + + Iterator iter = ReservedSpaceCatalog.getTokenSet().iterator(); + while (iter.hasNext()) { + log.debug("VO SA token REGISTRED: {}", iter.next().getValue()); + } + + GridUserInterface stormServiceUser = GridUserManager.makeSAGridUser(); + + // Remove obsolete space + ArrayOfTSpaceToken token_a = catalog.getSpaceTokens(stormServiceUser, null); + for (int i = 0; i < token_a.size(); i++) { + log.debug("VO SA token IN CATALOG: {}", token_a.getTSpaceToken(i).getValue()); + } + + if ((token_a != null) && (token_a.size() > 0)) { + for (int i = 0; i < token_a.size(); i++) { + + if (!ReservedSpaceCatalog.getTokenSet().contains(token_a.getTSpaceToken(i))) { + // This VOSA_token is no more used, removing it from persistence + TSpaceToken tokenToRemove = token_a.getTSpaceToken(i); + log.debug("VO SA token {} is no more used, removing it from persistence.", + tokenToRemove); + catalog.release(stormServiceUser, tokenToRemove); + } + } + } else { + log.warn( + "Space Catalog garbage SA_Token: no SA TOKENs specified. Please check your namespace.xml file."); + } + + ReservedSpaceCatalog.clearTokenSet(); + + } + + /** + * @param spaceData + * @return + */ + public static boolean isStorageArea(StorageSpaceData spaceData) throws IllegalArgumentException { + + if (spaceData == null) { + log.error("Received null spaceData parameter"); + throw new IllegalArgumentException("Received null spaceData parameter"); + } + boolean result = false; + if (spaceData.getOwner() != null) { + result = spaceData.getOwner().equals(SpaceHelper.storageAreaOwner); + } + return result; + } } diff --git a/src/main/java/it/grid/storm/space/StorageSpaceData.java b/src/main/java/it/grid/storm/space/StorageSpaceData.java index aba06f6b2..0c3fcd981 100644 --- a/src/main/java/it/grid/storm/space/StorageSpaceData.java +++ b/src/main/java/it/grid/storm/space/StorageSpaceData.java @@ -20,7 +20,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import it.grid.storm.catalogs.InvalidSpaceDataAttributesException; import it.grid.storm.common.types.InvalidPFNAttributeException; import it.grid.storm.common.types.PFN; import it.grid.storm.common.types.SizeUnit; @@ -28,6 +27,7 @@ import it.grid.storm.common.types.VO; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; +import it.grid.storm.persistence.exceptions.InvalidSpaceDataAttributesException; import it.grid.storm.persistence.model.StorageSpaceTO; import it.grid.storm.srm.types.InvalidTSizeAttributesException; import it.grid.storm.srm.types.InvalidTSpaceTokenAttributesException; @@ -218,7 +218,7 @@ public StorageSpaceData(StorageSpaceTO ssTO) { this.totalSpaceSize = TSizeInBytes.makeEmpty(); if (ssTO.getTotalSize() >= 0) { try { - this.totalSpaceSize = TSizeInBytes.make(ssTO.getTotalSize(), SizeUnit.BYTES); + this.totalSpaceSize = TSizeInBytes.make(ssTO.getTotalSize()); log.trace("StorageSpaceData - TotalSize (desired): {}", this.totalSpaceSize); } catch (InvalidTSizeAttributesException ex1) { log.error("Error while constructing TotalSize (desired)", ex1); @@ -232,7 +232,7 @@ public StorageSpaceData(StorageSpaceTO ssTO) { this.setTotalGuaranteedSize(TSizeInBytes.makeEmpty()); if (ssTO.getGuaranteedSize() >= 0) { try { - this.totalGuaranteedSize = TSizeInBytes.make(ssTO.getGuaranteedSize(), SizeUnit.BYTES); + this.totalGuaranteedSize = TSizeInBytes.make(ssTO.getGuaranteedSize()); log.trace("StorageSpaceData - TotalSize (guaranteed): {}", this.totalGuaranteedSize); } catch (InvalidTSizeAttributesException ex2) { log.error("Error while constructing SpaceGuaranteed", ex2); @@ -247,7 +247,7 @@ public StorageSpaceData(StorageSpaceTO ssTO) { this.forceAvailableSpaceSize(TSizeInBytes.makeEmpty()); if (ssTO.getAvailableSize() >= 0) { try { - this.forceAvailableSpaceSize(TSizeInBytes.make(ssTO.getAvailableSize(), SizeUnit.BYTES)); + this.forceAvailableSpaceSize(TSizeInBytes.make(ssTO.getAvailableSize())); log.trace("StorageSpaceData - AVAILABLE size : {}", this.getAvailableSpaceSize()); } catch (InvalidTSizeAttributesException ex3) { log.error("Error while constructing AvailableSpaceSize", ex3); @@ -263,7 +263,7 @@ public StorageSpaceData(StorageSpaceTO ssTO) { if (ssTO.getFreeSize() >= 0) { try { - this.forceFreeSpaceSize(TSizeInBytes.make(ssTO.getFreeSize(), SizeUnit.BYTES)); + this.forceFreeSpaceSize(TSizeInBytes.make(ssTO.getFreeSize())); log.trace("StorageSpaceData - FREE (= available + unavailable) size : {}", this.getFreeSpaceSize()); } catch (InvalidTSizeAttributesException ex3) { @@ -279,7 +279,7 @@ public StorageSpaceData(StorageSpaceTO ssTO) { this.usedSpaceSize = TSizeInBytes.makeEmpty(); if (ssTO.getUsedSize() >= 0) { try { - this.usedSpaceSize = TSizeInBytes.make(ssTO.getUsedSize(), SizeUnit.BYTES); + this.usedSpaceSize = TSizeInBytes.make(ssTO.getUsedSize()); log.trace("StorageSpaceData - USED size: {}", this.usedSpaceSize); } catch (InvalidTSizeAttributesException ex3) { log.error("Error while constructing UsedSpaceSize", ex3); @@ -294,7 +294,7 @@ public StorageSpaceData(StorageSpaceTO ssTO) { this.forceBusySpaceSize(TSizeInBytes.makeEmpty()); if (ssTO.getBusySize() >= 0) { try { - this.forceBusySpaceSize(TSizeInBytes.make(ssTO.getBusySize(), SizeUnit.BYTES)); + this.forceBusySpaceSize(TSizeInBytes.make(ssTO.getBusySize())); log.trace("StorageSpaceData - BUSY (= used + reserved + unavailable) size: {}", this.getBusySpaceSize()); } catch (InvalidTSizeAttributesException ex3) { @@ -311,7 +311,7 @@ public StorageSpaceData(StorageSpaceTO ssTO) { this.unavailableSpaceSize = TSizeInBytes.makeEmpty(); if (ssTO.getUnavailableSize() >= 0) { try { - this.unavailableSpaceSize = TSizeInBytes.make(ssTO.getUnavailableSize(), SizeUnit.BYTES); + this.unavailableSpaceSize = TSizeInBytes.make(ssTO.getUnavailableSize()); log.trace("StorageSpaceData - UNAVAILABLE size: {}", this.unavailableSpaceSize); } catch (InvalidTSizeAttributesException ex3) { log.error("Error while constructing UnavailableSpaceSize", ex3); @@ -325,7 +325,7 @@ public StorageSpaceData(StorageSpaceTO ssTO) { this.reservedSpaceSize = TSizeInBytes.makeEmpty(); if (ssTO.getReservedSize() >= 0) { try { - this.reservedSpaceSize = TSizeInBytes.make(ssTO.getReservedSize(), SizeUnit.BYTES); + this.reservedSpaceSize = TSizeInBytes.make(ssTO.getReservedSize()); log.trace("StorageSpaceData - TotalSize (reserved): {}", this.reservedSpaceSize); } catch (InvalidTSizeAttributesException ex2) { log.error("Error while constructing SpaceReserved", ex2); @@ -528,7 +528,7 @@ public final TSizeInBytes getFreeSpaceSize() { long size = this.totalSpaceSize.value() - this.usedSpaceSize.value(); if (size >= 0) { try { - this.freeSpaceSize = TSizeInBytes.make(size, SizeUnit.BYTES); + this.freeSpaceSize = TSizeInBytes.make(size); } catch (InvalidTSizeAttributesException e) { log.warn("Unable to create a valid Free Size, used empty one"); this.freeSpaceSize = TSizeInBytes.makeEmpty(); @@ -588,7 +588,7 @@ private final void updateFreeSize() { long freeSizeValue = this.totalSpaceSize.value() - this.getUsedSpaceSize().value(); if ((freeSizeValue < this.totalSpaceSize.value()) && (freeSizeValue >= 0)) { try { - this.freeSpaceSize = TSizeInBytes.make(freeSizeValue, SizeUnit.BYTES); + this.freeSpaceSize = TSizeInBytes.make(freeSizeValue); } catch (InvalidTSizeAttributesException e) { log.error(e.getMessage(), e); } @@ -603,7 +603,7 @@ private final void updateBusySize() { long busySize = usedSizeValue + reservedSizeValue + unavailableSizeValue; if ((busySize < this.totalSpaceSize.value()) && (busySize >= 0)) { try { - this.busySpaceSize = TSizeInBytes.make(busySize, SizeUnit.BYTES); + this.busySpaceSize = TSizeInBytes.make(busySize); } catch (InvalidTSizeAttributesException e) { log.error(e.getMessage(), e); } @@ -616,7 +616,7 @@ private final void updateAvailableSize() { long availableSizeValue = getTotalSpaceSize().value() - busySizeValue; if ((availableSizeValue < this.totalSpaceSize.value()) && (availableSizeValue >= 0)) { try { - this.availableSpaceSize = TSizeInBytes.make(availableSizeValue, SizeUnit.BYTES); + this.availableSpaceSize = TSizeInBytes.make(availableSizeValue); } catch (InvalidTSizeAttributesException e) { log.error(e.getMessage(), e); } @@ -655,7 +655,7 @@ public final TSizeInBytes getBusySpaceSize() { } else { try { this.busySpaceSize = TSizeInBytes.make(this.usedSpaceSize.value() - + this.unavailableSpaceSize.value() + this.reservedSpaceSize.value(), SizeUnit.BYTES); + + this.unavailableSpaceSize.value() + this.reservedSpaceSize.value()); } catch (InvalidTSizeAttributesException e) { log.warn("Unable to create a valid Busy Size, used empty one"); this.busySpaceSize = TSizeInBytes.makeEmpty(); @@ -676,8 +676,8 @@ public final TSizeInBytes getAvailableSpaceSize() { this.availableSpaceSize = TSizeInBytes.makeEmpty(); } else { try { - this.availableSpaceSize = TSizeInBytes - .make(this.totalSpaceSize.value() - this.getBusySpaceSize().value(), SizeUnit.BYTES); + this.availableSpaceSize = + TSizeInBytes.make(this.totalSpaceSize.value() - this.getBusySpaceSize().value()); } catch (InvalidTSizeAttributesException e) { log.warn("Unable to produce the TSizeInBytes object from '{}' and '{}'", (this.totalSpaceSize.value() - this.getBusySpaceSize().value()), SizeUnit.BYTES); diff --git a/src/main/java/it/grid/storm/space/gpfsquota/GPFSQuotaInfo.java b/src/main/java/it/grid/storm/space/gpfsquota/GPFSQuotaInfo.java index 8c7adaa5b..1f4798087 100644 --- a/src/main/java/it/grid/storm/space/gpfsquota/GPFSQuotaInfo.java +++ b/src/main/java/it/grid/storm/space/gpfsquota/GPFSQuotaInfo.java @@ -17,117 +17,114 @@ */ public class GPFSQuotaInfo implements GPFSFilesetQuotaInfo { - public static GPFSQuotaInfo fromNativeQuotaInfo(VirtualFS fs, quota_info qi) { + public static GPFSQuotaInfo fromNativeQuotaInfo(VirtualFS fs, quota_info qi) { - return new GPFSQuotaInfo(fs, qi); - } + return new GPFSQuotaInfo(fs, qi); + } - private long blockHardLimit; - private long blockSoftLimit; - private long blockUsage; + private long blockHardLimit; + private long blockSoftLimit; + private long blockUsage; - private String filesetName; - private boolean quotaEnabled = false; - private VirtualFS VFS; + private String filesetName; + private boolean quotaEnabled = false; + private VirtualFS VFS; - private GPFSQuotaInfo(VirtualFS fs, quota_info qi) { + private GPFSQuotaInfo(VirtualFS fs, quota_info qi) { - this.VFS = fs; - this.filesetName = qi.getFileset_name(); - this.blockUsage = qi.getBlock_usage(); - this.blockHardLimit = qi.getBlock_hard_limit(); - this.blockSoftLimit = qi.getBlock_soft_limit(); - this.quotaEnabled = true; - } + this.VFS = fs; + this.filesetName = qi.getFileset_name(); + this.blockUsage = qi.getBlock_usage(); + this.blockHardLimit = qi.getBlock_hard_limit(); + this.blockSoftLimit = qi.getBlock_soft_limit(); + this.quotaEnabled = true; + } - public long getBlockHardLimit() { + public long getBlockHardLimit() { - return blockHardLimit; - } + return blockHardLimit; + } - public long getBlockSoftLimit() { + public long getBlockSoftLimit() { - return blockSoftLimit; - } + return blockSoftLimit; + } - public long getBlockUsage() { + public long getBlockUsage() { - return blockUsage; - } + return blockUsage; + } - public String getFilesetName() { + public String getFilesetName() { - return filesetName; - } + return filesetName; + } - @Override - public SizeUnit getSizeUnit() { + @Override + public SizeUnit getSizeUnit() { - return SizeUnit.BYTES; - } + return SizeUnit.BYTES; + } - public VirtualFS getVFS() { + public VirtualFS getVFS() { - return VFS; - } + return VFS; + } - @Override - public boolean isQuotaEnabled() { + @Override + public boolean isQuotaEnabled() { - return quotaEnabled; - } + return quotaEnabled; + } - public void setBlockHardLimit(long blockHardLimit) { + public void setBlockHardLimit(long blockHardLimit) { - this.blockHardLimit = blockHardLimit; - } + this.blockHardLimit = blockHardLimit; + } - public void setBlockSoftLimit(long blockSoftLimit) { + public void setBlockSoftLimit(long blockSoftLimit) { - this.blockSoftLimit = blockSoftLimit; - } + this.blockSoftLimit = blockSoftLimit; + } - public void setBlockUsage(long blockUsage) { + public void setBlockUsage(long blockUsage) { - this.blockUsage = blockUsage; - } + this.blockUsage = blockUsage; + } - public void setFilesetName(String filesetName) { + public void setFilesetName(String filesetName) { - this.filesetName = filesetName; - } + this.filesetName = filesetName; + } - public void setVFS(VirtualFS vFS) { + public void setVFS(VirtualFS vFS) { - VFS = vFS; - } + VFS = vFS; + } - @Override - public String toString() { - return "GPFSQuotaInfo [filesetName=" + filesetName + ", blockUsage=" - + getBlockUsageAsTSize() + ", blockHardLimit=" + getBlockHardLimitAsTSize() + ", blockSoftLimit=" - + getBlockSoftLimitAsTSize() + ", quotaEnabled=" + quotaEnabled + "]"; - } + @Override + public String toString() { + return "GPFSQuotaInfo [filesetName=" + filesetName + ", blockUsage=" + getBlockUsageAsTSize() + + ", blockHardLimit=" + getBlockHardLimitAsTSize() + ", blockSoftLimit=" + + getBlockSoftLimitAsTSize() + ", quotaEnabled=" + quotaEnabled + "]"; + } - @Override - public TSizeInBytes getBlockUsageAsTSize() { + @Override + public TSizeInBytes getBlockUsageAsTSize() { - return TSizeInBytes.make(GPFSSizeHelper.getBytesFromKIB(getBlockUsage()), - getSizeUnit()); - } + return TSizeInBytes.make(GPFSSizeHelper.getBytesFromKIB(getBlockUsage())); + } - @Override - public TSizeInBytes getBlockHardLimitAsTSize() { + @Override + public TSizeInBytes getBlockHardLimitAsTSize() { - return TSizeInBytes.make(GPFSSizeHelper.getBytesFromKIB(getBlockHardLimit()), - getSizeUnit()); - } + return TSizeInBytes.make(GPFSSizeHelper.getBytesFromKIB(getBlockHardLimit())); + } - @Override - public TSizeInBytes getBlockSoftLimitAsTSize() { + @Override + public TSizeInBytes getBlockSoftLimitAsTSize() { - return TSizeInBytes.make(GPFSSizeHelper.getBytesFromKIB(getBlockSoftLimit()), - getSizeUnit()); - } + return TSizeInBytes.make(GPFSSizeHelper.getBytesFromKIB(getBlockSoftLimit())); + } } diff --git a/src/main/java/it/grid/storm/space/gpfsquota/GPFSQuotaManager.java b/src/main/java/it/grid/storm/space/gpfsquota/GPFSQuotaManager.java index 82310d1b2..cca2daeed 100644 --- a/src/main/java/it/grid/storm/space/gpfsquota/GPFSQuotaManager.java +++ b/src/main/java/it/grid/storm/space/gpfsquota/GPFSQuotaManager.java @@ -4,18 +4,6 @@ */ package it.grid.storm.space.gpfsquota; -import it.grid.storm.catalogs.ReservedSpaceCatalog; -import it.grid.storm.common.types.SizeUnit; -import it.grid.storm.concurrency.NamedThreadFactory; -import it.grid.storm.config.Configuration; -import it.grid.storm.filesystem.FilesystemError; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.model.VirtualFS; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.space.StorageSpaceData; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.util.VirtualFSHelper; - import java.util.List; import java.util.concurrent.CompletionService; import java.util.concurrent.ExecutionException; @@ -28,6 +16,17 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import it.grid.storm.catalogs.ReservedSpaceCatalog; +import it.grid.storm.concurrency.NamedThreadFactory; +import it.grid.storm.config.StormConfiguration; +import it.grid.storm.filesystem.FilesystemError; +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.model.VirtualFS; +import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.space.StorageSpaceData; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.util.VirtualFSHelper; + /** * GPFSQuotaManager. Currently supports only GPFS fileset quotas. This manager starts periodic tasks * that fetch quota information from gpfs fs and update the space area data on the Storm database. @@ -103,10 +102,9 @@ private void configureExecutionService() { quotaWorkersExecutionService = Executors.newFixedThreadPool(quotaEnabledFilesystems.size(), new NamedThreadFactory("GPFSQuotaWorker")); - quotaService = - new ExecutorCompletionService<>(quotaWorkersExecutionService); + quotaService = new ExecutorCompletionService<>(quotaWorkersExecutionService); - long refreshPeriod = Configuration.getInstance().getGPFSQuotaRefreshPeriod(); + long refreshPeriod = StormConfiguration.getInstance().getGPFSQuotaRefreshPeriod(); log.info("GPFSQuotaManager refresh period (in seconds): {}", refreshPeriod); @@ -197,7 +195,7 @@ private void handleNoLimitsQuota(GPFSFilesetQuotaInfo info, StorageSpaceData ssd try { long freeSizeFromFS = info.getVFS().getFSDriverInstance().get_free_space(); - TSizeInBytes freeSizeInBytes = TSizeInBytes.make(freeSizeFromFS, SizeUnit.BYTES); + TSizeInBytes freeSizeInBytes = TSizeInBytes.make(freeSizeFromFS); ssd.setTotalGuaranteedSize(freeSizeInBytes); ssd.setTotalSpaceSize(freeSizeInBytes); @@ -221,15 +219,13 @@ private void handleNoLimitsQuota(GPFSFilesetQuotaInfo info, StorageSpaceData ssd private StorageSpaceData getStorageSpaceDataForVFS(VirtualFS vfs) { - ReservedSpaceCatalog rsc = new ReservedSpaceCatalog(); String spaceToken = vfs.getSpaceTokenDescription(); - return rsc.getStorageSpaceByAlias(spaceToken); + return ReservedSpaceCatalog.getInstance().getStorageSpaceByAlias(spaceToken); } private void persistStorageSpaceData(StorageSpaceData ssd) throws DataAccessException { - ReservedSpaceCatalog rsc = new ReservedSpaceCatalog(); - rsc.updateStorageSpace(ssd); + ReservedSpaceCatalog.getInstance().updateStorageSpace(ssd); } private synchronized void setLastFailure(Throwable t) { diff --git a/src/main/java/it/grid/storm/srm/types/ArrayOfTSizeInBytes.java b/src/main/java/it/grid/storm/srm/types/ArrayOfTSizeInBytes.java index fa940caf0..da5697f69 100644 --- a/src/main/java/it/grid/storm/srm/types/ArrayOfTSizeInBytes.java +++ b/src/main/java/it/grid/storm/srm/types/ArrayOfTSizeInBytes.java @@ -12,75 +12,74 @@ */ package it.grid.storm.srm.types; -import it.grid.storm.common.types.SizeUnit; - import java.io.Serializable; -import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; +import com.google.common.collect.Lists; + public class ArrayOfTSizeInBytes implements Serializable { - private static final long serialVersionUID = -1987674620390240434L; + private static final long serialVersionUID = -1987674620390240434L; - public static final String PNAME_arrayOfExpectedFileSizes = "arrayOfExpectedFileSizes"; + public static final String PNAME_arrayOfExpectedFileSizes = "arrayOfExpectedFileSizes"; - private ArrayList sizeInBytesList; + private List sizeInBytesList; - public ArrayOfTSizeInBytes() { + public ArrayOfTSizeInBytes() { - sizeInBytesList = new ArrayList(); - } + sizeInBytesList = Lists.newArrayList(); + } - public static ArrayOfTSizeInBytes decode(Map inputParam, String fieldName) { + public static ArrayOfTSizeInBytes decode(Map inputParam, String fieldName) { - List inputList = null; - try { - inputList = Arrays.asList((Object[]) inputParam.get(fieldName)); - } catch (NullPointerException e) { - // log.warn("Empty SURL array found!"); - } + List inputList = null; + try { + inputList = Arrays.asList((Object[]) inputParam.get(fieldName)); + } catch (NullPointerException e) { + // log.warn("Empty SURL array found!"); + } - if (inputList == null) - return null; + if (inputList == null) + return null; - ArrayOfTSizeInBytes list = new ArrayOfTSizeInBytes(); - for (int i = 0; i < inputList.size(); i++) { - TSizeInBytes size = null; - String strLong = (String) inputList.get(i); - try { - size = TSizeInBytes.make(Long.parseLong(strLong), SizeUnit.BYTES); - } catch (InvalidTSizeAttributesException e) { - return null; - } - list.addTSizeInBytes(size); - } - return list; - } + ArrayOfTSizeInBytes list = new ArrayOfTSizeInBytes(); + for (int i = 0; i < inputList.size(); i++) { + TSizeInBytes size = null; + String strLong = (String) inputList.get(i); + try { + size = TSizeInBytes.make(Long.parseLong(strLong)); + } catch (InvalidTSizeAttributesException e) { + return null; + } + list.addTSizeInBytes(size); + } + return list; + } - public Object[] getArray() { + public Object[] getArray() { - return sizeInBytesList.toArray(); - } + return sizeInBytesList.toArray(); + } - public TSizeInBytes getTSizeInBytes(int i) { + public TSizeInBytes getTSizeInBytes(int i) { - return (TSizeInBytes) sizeInBytesList.get(i); - } + return sizeInBytesList.get(i); + } - public void setTSizeInBytes(int index, TSizeInBytes size) { + public void setTSizeInBytes(int index, TSizeInBytes size) { - sizeInBytesList.set(index, size); - } + sizeInBytesList.set(index, size); + } - public void addTSizeInBytes(TSizeInBytes size) { + public void addTSizeInBytes(TSizeInBytes size) { - sizeInBytesList.add(size); - } + sizeInBytesList.add(size); + } - public int size() { + public int size() { - return sizeInBytesList.size(); - } + return sizeInBytesList.size(); + } } diff --git a/src/main/java/it/grid/storm/srm/types/InvalidTSizeAttributesException.java b/src/main/java/it/grid/storm/srm/types/InvalidTSizeAttributesException.java index 8b77c083a..364529f40 100644 --- a/src/main/java/it/grid/storm/srm/types/InvalidTSizeAttributesException.java +++ b/src/main/java/it/grid/storm/srm/types/InvalidTSizeAttributesException.java @@ -4,40 +4,21 @@ */ package it.grid.storm.srm.types; -/** - * This class represents an Exception thrown when FileSize receives null as - * constructor attributes; or a negative size. - * - * @author Ezio Corso - * @author EGRID - ICTP Trieste - * @date March 23rd, 2005 - * @version 1.0 - */ -import it.grid.storm.common.types.SizeUnit; - public class InvalidTSizeAttributesException extends RuntimeException { - /** - * - */ - private static final long serialVersionUID = 1L; - - private boolean negativeSize; - private boolean nullUnit; - - /** - * Constructor that requires the long and the SizeUnit that caused the - * exception to be thrown. - */ - public InvalidTSizeAttributesException(long size, SizeUnit unit) { + /** + * + */ + private static final long serialVersionUID = 1L; + + private long size; - nullUnit = unit == null; - negativeSize = size < 0; - } + public InvalidTSizeAttributesException(long size) { + this.size = size; + } - public String toString() { + public String toString() { - return "Ivalid TFileSize Attributes: nullSizeUnit=" + nullUnit - + "; negativeSize=" + negativeSize; - } + return "Invalid TFileSize Attributes: " + size; + } } diff --git a/src/main/java/it/grid/storm/srm/types/TRequestToken.java b/src/main/java/it/grid/storm/srm/types/TRequestToken.java index 0f7454edf..ae9194fb4 100644 --- a/src/main/java/it/grid/storm/srm/types/TRequestToken.java +++ b/src/main/java/it/grid/storm/srm/types/TRequestToken.java @@ -12,7 +12,7 @@ import com.fasterxml.jackson.annotation.JsonIgnore; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; /** * This class represents a Request Token @@ -31,7 +31,7 @@ public class TRequestToken implements Serializable { private final Calendar expiration; - private static final long REQUEST_LIFETIME = Configuration.getInstance() + private static final long REQUEST_LIFETIME = StormConfiguration.getInstance() .getExpiredRequestTime() * 1000; public TRequestToken(String requestToken, Date timestamp) diff --git a/src/main/java/it/grid/storm/srm/types/TSURL.java b/src/main/java/it/grid/storm/srm/types/TSURL.java index 58420a373..a840fd938 100644 --- a/src/main/java/it/grid/storm/srm/types/TSURL.java +++ b/src/main/java/it/grid/storm/srm/types/TSURL.java @@ -15,7 +15,7 @@ import it.grid.storm.common.types.SFN; import it.grid.storm.common.types.SiteProtocol; import it.grid.storm.common.types.StFN; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.naming.SURL; @@ -66,7 +66,7 @@ public class TSURL { if (tsurlManaged.isEmpty()) { TSURL checkTSURL; - String[] surlValid = Configuration.getInstance().getManagedSURLs(); + String[] surlValid = StormConfiguration.getInstance().getManagedSURLs(); for (String checkSurl : surlValid) { try { @@ -83,7 +83,7 @@ public class TSURL { if (defaultPorts.isEmpty()) { - Integer[] ports = Configuration.getInstance() + Integer[] ports = StormConfiguration.getInstance() .getManagedSurlDefaultPorts(); for (Integer portInteger : ports) { diff --git a/src/main/java/it/grid/storm/srm/types/TSizeInBytes.java b/src/main/java/it/grid/storm/srm/types/TSizeInBytes.java index 70f5bab9f..97db4208a 100644 --- a/src/main/java/it/grid/storm/srm/types/TSizeInBytes.java +++ b/src/main/java/it/grid/storm/srm/types/TSizeInBytes.java @@ -17,174 +17,171 @@ import java.io.Serializable; import java.util.Map; +import com.fasterxml.jackson.annotation.JsonProperty; public class TSizeInBytes implements Serializable { - public static String PNAME_SIZE = "size"; - public static String PNAME_DESIREDSIZEOFTOTALSPACE = "desiredSizeOfTotalSpace"; - public static String PNAME_DESIREDSIZEOFGUARANTEEDSPACE = "desiredSizeOfGuaranteedSpace"; - public static String PNAME_SIZEOFTOTALRESERVEDSPACE = "sizeOfTotalReservedSpace"; - public static String PNAME_SIZEOFGUARANTEEDRESERVEDSPACE = "sizeOfGuaranteedReservedSpace"; - public static String PNAME_TOTALSIZE = "totalSize"; - public static String PNAME_GUARANTEEDSIZE = "guaranteedSize"; - public static String PNAME_UNUSEDSIZE = "unusedSize"; - - private long size = -1; - private SizeUnit unit = SizeUnit.EMPTY; - private boolean empty = true; - static private TSizeInBytes emptySize = null; // only instance of empty - // TSizeInBytes! - - /** - * Constructor requiring the size as a long, and the unit of measure SizeUnit. - */ - private TSizeInBytes(long size, SizeUnit unit, boolean empty) { - - this.size = size; - this.unit = unit; - this.empty = empty; - } - - /** - * Factory method that returns a TSizeInBytes object; an - * InvalidTSizeAttributesException is thrown if a null SizeUnit is passed, or - * if a negative long is passed as size. - */ - public static TSizeInBytes make(long size, SizeUnit unit) - throws InvalidTSizeAttributesException { - - if ((unit == null) || (size < 0)) - throw new InvalidTSizeAttributesException(size, unit); - return new TSizeInBytes(size, unit, false); - } - - /** - * Method that returns an empty TSizeInBytes object. - */ - public static TSizeInBytes makeEmpty() { - - if (emptySize != null) - return emptySize; - emptySize = new TSizeInBytes(-1, SizeUnit.EMPTY, true); - return emptySize; - } - - /** - * Method that returns a TSizeInBytes object retrieving its value by the - * Hashtable used for comunicating with the FE - */ - public static TSizeInBytes decode(Map inputParam, String fieldName) { - - String size = (String) inputParam.get(fieldName); - - if (size == null) - return TSizeInBytes.makeEmpty(); - long sizeLong = Long.parseLong(size); - - TSizeInBytes decodedSize = null; - try { - decodedSize = TSizeInBytes.make(sizeLong, SizeUnit.BYTES); - } catch (InvalidTSizeAttributesException e) { - return TSizeInBytes.makeEmpty(); - } - - return decodedSize; - } - - /** - * Method that converts this FileSize to the specified SizeUnit; beware that - * converting back will _not_ satisfy equality because of rounding in - * calculation. In case the wanted unit is null, or this TSizeInBytes is - * empty, -1 is returned. - */ - public double getSizeIn(SizeUnit unit) { - - if ((unit != null) && (!empty)) { - Long l_size = Long.valueOf(size); - double result = l_size.doubleValue() - * (this.unit.conversionFactor() / unit.conversionFactor()); - return result; - } else - return -1; - } - - /** - * Method that returns a long that represents the value with which this - * TSizeInBytes was created. In case this is empty, -1 is returned. - */ - public long value() { - - if (empty) - return -1; - return size; - } - - /** - * Method that returns the SizeUnit with which this TSizeInBytes was created. - * In case this is empty, SizeUnit.EMPTY is returned. - */ - public SizeUnit unit() { - - if (empty) - return SizeUnit.EMPTY; - return unit; - } - - /** - * Method that returns whether this is an empty TSizeInBytes. - */ - public boolean isEmpty() { - - return empty; - } - - /** - * Method uses to encode value for FE communication. - */ - public void encode(Map param, String fieldName) { - - if (empty) - return; - - long size_out; - Long sizeInBytes = Long.valueOf(this.value()); - if (sizeInBytes != null) - size_out = sizeInBytes.longValue(); - else - size_out = -1; - - param.put(fieldName, String.valueOf(size_out)); - } - - public String toString() { - - if (empty) - return "Empty"; - return size + " " + unit; - } - - /** - * Beware that this equality will _not_ return true for the same quantity - * expressed in different units of measure! - */ - public boolean equals(Object o) { - - if (o == this) - return true; - if (!(o instanceof TSizeInBytes)) - return false; - TSizeInBytes fs = (TSizeInBytes) o; - if ((empty) && (fs.empty)) - return true; - return ((!empty) && (!fs.empty) && (this.size == fs.size) && (this.unit == fs.unit)); - } - - public int hashCode() { - - if (empty) - return 0; - int hash = 17; - hash = 37 * hash + (Long.valueOf(size)).hashCode(); - hash = 37 * hash + unit.hashCode(); - return hash; - } -} + /** + * + */ + private static final long serialVersionUID = 1L; + + public static String PNAME_SIZE = "size"; + public static String PNAME_DESIREDSIZEOFTOTALSPACE = "desiredSizeOfTotalSpace"; + public static String PNAME_DESIREDSIZEOFGUARANTEEDSPACE = "desiredSizeOfGuaranteedSpace"; + public static String PNAME_SIZEOFTOTALRESERVEDSPACE = "sizeOfTotalReservedSpace"; + public static String PNAME_SIZEOFGUARANTEEDRESERVEDSPACE = "sizeOfGuaranteedReservedSpace"; + public static String PNAME_TOTALSIZE = "totalSize"; + public static String PNAME_GUARANTEEDSIZE = "guaranteedSize"; + public static String PNAME_UNUSEDSIZE = "unusedSize"; + + public final static long EMPTY_SIZE = -1; + + private long size = EMPTY_SIZE; + private SizeUnit unit; + + private TSizeInBytes(long size, SizeUnit unit) { + + this.size = size; + this.unit = unit; + } + + /** + * Factory method that returns a TSizeInBytes object; an InvalidTSizeAttributesException is thrown + * if a null SizeUnit is passed, or if a negative long is passed as size. + */ + public static TSizeInBytes make(long size) throws InvalidTSizeAttributesException { + + if (size < 0) { + throw new InvalidTSizeAttributesException(size); + } + return new TSizeInBytes(size, SizeUnit.BYTES); + } + + /** + * Method that returns an empty TSizeInBytes object. + */ + public static TSizeInBytes makeEmpty() { + + return new TSizeInBytes(EMPTY_SIZE, SizeUnit.EMPTY); + } + + /** + * Method that returns a TSizeInBytes object retrieving its value by the HashTable used for + * communicating with the FE + */ + public static TSizeInBytes decode(Map inputParam, String fieldName) { + + String size = (String) inputParam.get(fieldName); + + if (size == null) + return TSizeInBytes.makeEmpty(); + long sizeLong = Long.parseLong(size); + + TSizeInBytes decodedSize = null; + try { + decodedSize = TSizeInBytes.make(sizeLong); + } catch (InvalidTSizeAttributesException e) { + return TSizeInBytes.makeEmpty(); + } + + return decodedSize; + } + + /** + * Method that converts this FileSize to the specified SizeUnit; beware that converting back will + * _not_ satisfy equality because of rounding in calculation. In case the wanted unit is null, or + * this TSizeInBytes is empty, -1 is returned. + */ + public double getSizeIn(SizeUnit unit) { + + if (!isEmpty()) { + Long l_size = Long.valueOf(size); + double result = + l_size.doubleValue() * (this.unit.conversionFactor() / unit.conversionFactor()); + return result; + } + return -1; + } + + /** + * Method that returns a long that represents the value with which this TSizeInBytes was created. + * In case this is empty, -1 is returned. + */ + @JsonProperty("value") + public long value() { + + return !isEmpty() ? size : -1; + } + + /** + * Method that returns the SizeUnit with which this TSizeInBytes was created. In case this is + * empty, SizeUnit.EMPTY is returned. + */ + @JsonProperty("unit") + public SizeUnit unit() { + + return unit; + } + + /** + * Method that returns whether this is an empty TSizeInBytes. + */ + public boolean isEmpty() { + + return size == EMPTY_SIZE; + } + + /** + * Method uses to encode value for FE communication. + */ + public void encode(Map param, String fieldName) { + + if (isEmpty()) { + return; + } + + long size_out; + Long sizeInBytes = Long.valueOf(this.value()); + if (sizeInBytes != null) + size_out = sizeInBytes.longValue(); + else + size_out = -1; + + param.put(fieldName, String.valueOf(size_out)); + } + + public String toString() { + + if (isEmpty()) { + return "Empty"; + } + + return size + " " + unit; + } + + /** + * Beware that this equality will _not_ return true for the same quantity expressed in different + * units of measure! + */ + public boolean equals(Object o) { + + if (o == this) + return true; + if (!(o instanceof TSizeInBytes)) + return false; + TSizeInBytes fs = (TSizeInBytes) o; + if (isEmpty() && (fs.isEmpty())) + return true; + return ((!isEmpty()) && (!fs.isEmpty()) && (this.size == fs.size) && (this.unit == fs.unit)); + } + + public int hashCode() { + + if (isEmpty()) + return 0; + int hash = 17; + hash = 37 * hash + (Long.valueOf(size)).hashCode(); + hash = 37 * hash + unit.hashCode(); + return hash; + } +} \ No newline at end of file diff --git a/src/main/java/it/grid/storm/synchcall/FileSystemUtility.java b/src/main/java/it/grid/storm/synchcall/FileSystemUtility.java index b9bbd24f9..513c3070f 100644 --- a/src/main/java/it/grid/storm/synchcall/FileSystemUtility.java +++ b/src/main/java/it/grid/storm/synchcall/FileSystemUtility.java @@ -8,58 +8,56 @@ import static it.grid.storm.metrics.StormMetricRegistry.METRIC_REGISTRY; import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import it.grid.storm.filesystem.Filesystem; import it.grid.storm.filesystem.FilesystemIF; import it.grid.storm.filesystem.LocalFile; import it.grid.storm.filesystem.MetricsFilesystemAdapter; import it.grid.storm.filesystem.swig.genericfs; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.model.VirtualFS; public class FileSystemUtility { - private static Logger log = NamespaceDirector.getLogger(); + private static final Logger log = LoggerFactory.getLogger(FileSystemUtility.class); - public static LocalFile getLocalFileByAbsolutePath(String absolutePath) - throws NamespaceException { + public static LocalFile getLocalFileByAbsolutePath(String absolutePath) + throws NamespaceException { - LocalFile file = null; - VirtualFS vfs = null; - genericfs fsDriver = null; - FilesystemIF fs = null; - try { - vfs = NamespaceDirector.getNamespace().resolveVFSbyAbsolutePath( - absolutePath); - } catch (NamespaceException ex) { - log.error("Unable to retrieve VFS by Absolute Path", ex); - } - if (vfs == null) { - throw new NamespaceException("No VFS found in StoRM for this file :'" - + absolutePath + "'"); - } + LocalFile file = null; + VirtualFS vfs = null; + genericfs fsDriver = null; + FilesystemIF fs = null; + try { + vfs = Namespace.getInstance().resolveVFSbyAbsolutePath(absolutePath); + } catch (NamespaceException ex) { + log.error("Unable to retrieve VFS by Absolute Path", ex); + } + if (vfs == null) { + throw new NamespaceException("No VFS found in StoRM for this file :'" + absolutePath + "'"); + } - try { - fsDriver = (genericfs) (vfs.getFSDriver()).newInstance(); - - FilesystemIF wrappedFs = new Filesystem(fsDriver); - - wrappedFs = maybeWrapFilesystem(wrappedFs); - - fs = new MetricsFilesystemAdapter(wrappedFs, - METRIC_REGISTRY.getRegistry()); - - file = new LocalFile(absolutePath, fs); - } catch (NamespaceException ex1) { - log.error("Error while retrieving FS driver", ex1); - } catch (IllegalAccessException ex1) { - log.error("Error while using reflection in FS Driver", ex1); - } catch (InstantiationException ex1) { - log.error("Error while instancing new FS driver", ex1); - } + try { + fsDriver = (genericfs) (vfs.getFSDriver()).newInstance(); - return file; - } + FilesystemIF wrappedFs = new Filesystem(fsDriver); + + wrappedFs = maybeWrapFilesystem(wrappedFs); + + fs = new MetricsFilesystemAdapter(wrappedFs, METRIC_REGISTRY.getRegistry()); + + file = new LocalFile(absolutePath, fs); + } catch (NamespaceException ex1) { + log.error("Error while retrieving FS driver", ex1); + } catch (IllegalAccessException ex1) { + log.error("Error while using reflection in FS Driver", ex1); + } catch (InstantiationException ex1) { + log.error("Error while instancing new FS driver", ex1); + } + + return file; + } } diff --git a/src/main/java/it/grid/storm/synchcall/command/AbstractCommand.java b/src/main/java/it/grid/storm/synchcall/command/AbstractCommand.java index 892d3df25..8b2bc5566 100644 --- a/src/main/java/it/grid/storm/synchcall/command/AbstractCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/AbstractCommand.java @@ -4,7 +4,7 @@ */ package it.grid.storm.synchcall.command; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.synchcall.data.IdentityInputData; import it.grid.storm.synchcall.data.InputData; @@ -12,7 +12,7 @@ public abstract class AbstractCommand implements Command { - protected static Configuration config = Configuration.getInstance(); + protected static StormConfiguration config = StormConfiguration.getInstance(); public static GridUserInterface getUserFromInputData(InputData id){ diff --git a/src/main/java/it/grid/storm/synchcall/command/datatransfer/ExtendFileLifeTimeCommand.java b/src/main/java/it/grid/storm/synchcall/command/datatransfer/ExtendFileLifeTimeCommand.java index 99f9c11f8..3a26c6580 100644 --- a/src/main/java/it/grid/storm/synchcall/command/datatransfer/ExtendFileLifeTimeCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/datatransfer/ExtendFileLifeTimeCommand.java @@ -11,9 +11,8 @@ import it.grid.storm.filesystem.LocalFile; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; import it.grid.storm.srm.types.ArrayOfSURLs; @@ -55,567 +54,518 @@ * @author Alberto Forti * @date = Oct 10, 2008 */ -public class ExtendFileLifeTimeCommand extends DataTransferCommand implements - Command { - - private static final Logger log = LoggerFactory - .getLogger(ExtendFileLifeTimeCommand.class); - private static final String SRM_COMMAND = "srmExtendFileLifeTime"; - - public ExtendFileLifeTimeCommand() { - - }; - - /** - * Executes an srmExtendFileLifeTime(). - * - * @param inputData - * ExtendFileLifeTimeInputData - * @return ExtendFileLifeTimeOutputData - */ - - public OutputData execute(InputData data) { - - final String funcName = "ExtendFileLifeTime: "; - ExtendFileLifeTimeOutputData outputData = new ExtendFileLifeTimeOutputData(); - IdentityExtendFileLifeTimeInputData inputData; - if (data instanceof IdentityInputData) { - inputData = (IdentityExtendFileLifeTimeInputData) data; - } else { - outputData.setReturnStatus(CommandHelper.buildStatus( - TStatusCode.SRM_NOT_SUPPORTED, "Anonymous user can not perform" - + SRM_COMMAND)); - outputData.setArrayOfFileStatuses(null); - printRequestOutcome(outputData.getReturnStatus(), - (ExtendFileLifeTimeInputData) data); - return outputData; - } - - TReturnStatus globalStatus = null; - - ExtendFileLifeTimeCommand.log.debug(funcName + "Started."); - - /****************************** Check for malformed request ******************************/ - if (inputData.getArrayOfSURLs() == null) { - globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, - "Missing mandatory parameter 'arrayOfSURLs'"); - } else if (inputData.getArrayOfSURLs().size() < 1) { - globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, - "Parameter 'arrayOfSURLs': invalid size"); - } else if (!(inputData.getNewPinLifetime().isEmpty()) - && !(inputData.getNewFileLifetime().isEmpty()) - && (inputData.getRequestToken() != null)) { - globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, - "Cannot update both FileLifetime and PinLifetime"); - } else if (inputData.getNewPinLifetime().isEmpty() - && !(inputData.getNewFileLifetime().isEmpty()) - && (inputData.getRequestToken() != null)) { - globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, - "Do not specify the request token to update the FileLifetime"); - } else if (!(inputData.getNewPinLifetime().isEmpty()) - && !(inputData.getNewFileLifetime().isEmpty()) - && (inputData.getRequestToken() == null)) { - globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, - "Attempt to extend PinLifetime without request token"); - } else if (!(inputData.getNewPinLifetime().isEmpty()) - && inputData.getNewFileLifetime().isEmpty() - && (inputData.getRequestToken() == null)) { - globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, - "Attempt to extend PinLifetime without request token"); - } - - if (globalStatus != null) { - ExtendFileLifeTimeCommand.log.debug(funcName - + globalStatus.getExplanation()); - outputData.setReturnStatus(globalStatus); - outputData.setArrayOfFileStatuses(null); - printRequestOutcome(outputData.getReturnStatus(), inputData); - return outputData; - } - - /********************** Check user authentication and authorization ******************************/ - GridUserInterface user = inputData.getUser(); - if (user == null) { - ExtendFileLifeTimeCommand.log.debug(funcName + "The user field is NULL"); - outputData.setReturnStatus(CommandHelper.buildStatus( - TStatusCode.SRM_AUTHENTICATION_FAILURE, - "Unable to get user credential!")); - printRequestOutcome(outputData.getReturnStatus(), inputData); - outputData.setArrayOfFileStatuses(null); - return outputData; - } - - /********************************** Start to manage the request ***********************************/ - ArrayOfTSURLLifetimeReturnStatus arrayOfFileStatus = new ArrayOfTSURLLifetimeReturnStatus(); - - if ((inputData.getRequestToken() == null) - && (inputData.getNewPinLifetime().isEmpty())) { - log.debug(funcName + "Extending SURL lifetime..."); - globalStatus = manageExtendSURLLifetime(inputData.getNewFileLifetime(), - inputData.getArrayOfSURLs(), user, arrayOfFileStatus, - inputData.getRequestToken()); - } else { - log.debug(funcName + "Extending PIN lifetime..."); - try { - globalStatus = manageExtendPinLifetime(inputData.getRequestToken(), - inputData.getNewPinLifetime(), inputData.getArrayOfSURLs(), user, - arrayOfFileStatus); - } catch (IllegalArgumentException e) { - log.error(funcName + "Unexpected IllegalArgumentException: " - + e.getMessage()); - globalStatus = CommandHelper.buildStatus( - TStatusCode.SRM_INTERNAL_ERROR, "Request Failed, retry."); - outputData.setReturnStatus(globalStatus); - outputData.setArrayOfFileStatuses(null); - printRequestOutcome(outputData.getReturnStatus(), inputData); - return outputData; - } - } - - outputData.setReturnStatus(globalStatus); - outputData.setArrayOfFileStatuses(arrayOfFileStatus); - printRequestOutcome(outputData.getReturnStatus(), inputData); - log.debug(funcName + "Finished."); - - return outputData; - } - - /** - * Extend the lifetime of a SURL. The parameter details is filled by this - * method and contains file level information on the execution of the request. - * - * @param newLifetime - * TLifeTimeInSeconds. - * @param arrayOfSURLS - * ArrayOfSURLs. - * @param guser - * VomsGridUser. - * @param arrayOfFileLifetimeStatus - * . ArrayOfTSURLLifetimeReturnStatus The returned file level - * information. - * @return TReturnStatus. The request status. - */ - private TReturnStatus manageExtendSURLLifetime( - TLifeTimeInSeconds newLifetime, ArrayOfSURLs arrayOfSURLS, - GridUserInterface guser, ArrayOfTSURLLifetimeReturnStatus details, - TRequestToken requestToken) { - - if (details == null) { - ExtendFileLifeTimeCommand.log - .debug("Function manageExtendSURLLifetime, class ExtendFileLifeTimeExecutor: parameter details is NULL"); - } - NamespaceInterface namespace = NamespaceDirector.getNamespace(); - VolatileAndJiTCatalog catalog = VolatileAndJiTCatalog.getInstance(); - boolean requestSuccess = true; - boolean requestFailure = true; - - // For each requested SURL, try to extend its lifetime. - for (int i = 0; i < arrayOfSURLS.size(); i++) { - TSURL surl = arrayOfSURLS.getTSURL(i); - StoRI stori = null; - TStatusCode fileStatusCode; - String fileStatusExplanation; - try { - try { - stori = namespace.resolveStoRIbySURL(surl, guser); - } catch (IllegalArgumentException e) { - ExtendFileLifeTimeCommand.log.error( - "Unable to build StoRI by SURL and user", e); - fileStatusCode = TStatusCode.SRM_INTERNAL_ERROR; - fileStatusExplanation = "Unable to build StoRI by SURL and user"; - } catch (UnapprochableSurlException e) { - log.info("Unable to build a stori for surl " + surl + " for user " - + guser + " UnapprochableSurlException: " + e.getMessage()); - fileStatusCode = TStatusCode.SRM_AUTHORIZATION_FAILURE; - fileStatusExplanation = e.getMessage(); - } catch (NamespaceException e) { - log.info("Unable to build a stori for surl " + surl + " for user " - + guser + " NamespaceException: " + e.getMessage()); - fileStatusCode = TStatusCode.SRM_INTERNAL_ERROR; - fileStatusExplanation = e.getMessage(); - } catch (InvalidSURLException e) { - log.info("Unable to build a stori for surl " + surl + " for user " - + guser + " InvalidSURLException: " + e.getMessage()); - fileStatusCode = TStatusCode.SRM_INVALID_PATH; - fileStatusExplanation = e.getMessage(); - } - if (stori != null) { - LocalFile localFile = stori.getLocalFile(); - if (localFile.exists()) { - ExtendFileLifeTimeCommand.log.debug(stori.getPFN().toString()); - List volatileInfo = catalog.volatileInfoOn(stori.getPFN()); - if (volatileInfo.isEmpty()) { - fileStatusCode = TStatusCode.SRM_SUCCESS; - fileStatusExplanation = "Nothing to do, SURL is permanent"; - newLifetime = TLifeTimeInSeconds.makeInfinite(); - requestFailure = false; - } else if (volatileInfo.size() > 2) { - fileStatusCode = TStatusCode.SRM_INTERNAL_ERROR; - fileStatusExplanation = "Found more than one entry.... that's a BUG."; - // For lifetimes infinite means also unknown - newLifetime = TLifeTimeInSeconds.makeInfinite(); - requestSuccess = false; - } else if (isStoRISURLBusy(stori)) { - fileStatusCode = TStatusCode.SRM_FILE_BUSY; - fileStatusExplanation = "File status is SRM_SPACE_AVAILABLE. SURL lifetime cannot be extend (try with PIN lifetime)"; - // For lifetimes infinite means also unknown - newLifetime = TLifeTimeInSeconds.makeInfinite(); - requestSuccess = false; - } else { // Ok, extend the lifetime of the SURL - // Update the DB with the new lifetime - catalog.trackVolatile(stori.getPFN(), - (Calendar) volatileInfo.get(0), newLifetime); - // TODO: return the correct lifetime, i.e. the one which is - // written to the DB. - // TLifeTimeInSeconds writtenLifetime = (TLifeTimeInSeconds) - // volatileInfo.get(1); - - fileStatusCode = TStatusCode.SRM_SUCCESS; - fileStatusExplanation = "Lifetime extended"; - requestFailure = false; - } - } else { // Requested SURL does not exists in the filesystem - fileStatusCode = TStatusCode.SRM_INVALID_PATH; - fileStatusExplanation = "File does not exist"; - requestSuccess = false; - } - - // Set the file level information to be returned. - TReturnStatus fileStatus = new TReturnStatus(fileStatusCode, - fileStatusExplanation); - if (fileStatus.getStatusCode().equals(TStatusCode.SRM_SUCCESS)) { - ExtendFileLifeTimeCommand.log.info("srmExtendFileLifeTime: <" - + guser + "> Request for [token:" + requestToken + "] for [SURL:" - + surl + "] with [lifetime:" + newLifetime - + " ] successfully done with: [status:" + fileStatus + "]"); - } else { - ExtendFileLifeTimeCommand.log.error("srmExtendFileLifeTime: <" - + guser + "> Request for [token:" + requestToken + "] for [SURL:" - + surl + "] with [lifetime:" + newLifetime - + "] failed with: [status:" + fileStatus + "]"); - } - TSURLLifetimeReturnStatus lifetimeReturnStatus = new TSURLLifetimeReturnStatus( - surl, fileStatus, newLifetime, null); - details.addTSurlReturnStatus(lifetimeReturnStatus); - } - } catch (InvalidTSURLLifetimeReturnStatusAttributeException e3) { - ExtendFileLifeTimeCommand.log - .debug("Thrown InvalidTSURLLifetimeReturnStatusAttributeException"); - } - } - TReturnStatus globalStatus = null; - // Set global status - if (requestFailure) { - globalStatus = new TReturnStatus(TStatusCode.SRM_FAILURE, - "All file requests are failed"); - } else if (requestSuccess) { - globalStatus = new TReturnStatus(TStatusCode.SRM_SUCCESS, - "All file requests are successfully completed"); - } else { - globalStatus = new TReturnStatus(TStatusCode.SRM_PARTIAL_SUCCESS, - "Details are on the file statuses"); - } - return globalStatus; - } - - /** - * Returns true if the status of the SURL of the received StoRI is - * SRM_SPACE_AVAILABLE, false otherwise. This method queries the DB, therefore - * pay attention to possible performance issues. - * - * @return boolean - */ - private boolean isStoRISURLBusy(StoRI element) { - - SURLStatusManager checker = SURLStatusManagerFactory - .newSURLStatusManager(); - - return checker.isSURLBusy(element.getSURL()); - } - - /** - * Extend the PIN lifetime of a SURL. The parameter details is filled by this - * method and contains file level information on the execution of the request. - * - * @param requestToken - * TRequestToken. - * @param newPINLifetime - * TLifeTimeInSeconds. - * @param arrayOfSURLS - * ArrayOfSURLs. - * @param guser - * VomsGridUser. - * @param details - * ArrayOfTSURLLifetimeReturnStatus. - * @return TReturnStatus. The request status. - * @throws UnknownTokenException - * @throws IllegalArgumentException - */ - private TReturnStatus manageExtendPinLifetime(TRequestToken requestToken, - TLifeTimeInSeconds newPINLifetime, ArrayOfSURLs arrayOfSURLS, - GridUserInterface guser, ArrayOfTSURLLifetimeReturnStatus details) - throws IllegalArgumentException { - - if (details == null) { - ExtendFileLifeTimeCommand.log - .debug("Function manageExtendSURLLifetime, class ExtendFileLifeTimeExecutor: parameter details is NULL"); - } - TReturnStatus globalStatus = null; - List requestSURLsList; - try { - requestSURLsList = getListOfSURLsInTheRequest(guser, requestToken); - } catch (UnknownTokenException e4) { - return CommandHelper.buildStatus(TStatusCode.SRM_INVALID_REQUEST, - "Invalid request token"); - } catch (ExpiredTokenException e) { - return CommandHelper.buildStatus(TStatusCode.SRM_REQUEST_TIMED_OUT, - "Request expired"); - } catch (AuthzException e) { - return CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, - e.getMessage()); - } - if (requestSURLsList.isEmpty()) { - return CommandHelper.buildStatus(TStatusCode.SRM_INVALID_REQUEST, - "Invalid request token"); - } - // Once we have the list of SURLs belonging to the request, we must check - // that the SURLs given by the user are consistent, that the resulting - // lifetime could be lower than the one requested (and for this we must read - // the Volatile table of the DB), that the SURLs are not released, aborted, - // expired or suspended and so on... therefore the purpose of all that stuff - // is to return the right information. I mean, no PIN lifetime is - // effectively extend, in StoRM the TURL corresponds to the SURL. - boolean requestSuccess = true; - boolean requestFailure = true; - TLifeTimeInSeconds PINLifetime; - TLifeTimeInSeconds dbLifetime = null; - for (int i = 0; i < arrayOfSURLS.size(); i++) { - TSURL surl = arrayOfSURLS.getTSURL(i); - TStatusCode statusOfTheSURL = null; - TStatusCode fileStatusCode; - String fileStatusExplanation; - boolean surlFound = false; - // Check if the current SURL belongs to the request token - for (int j = 0; j < requestSURLsList.size(); j++) { - SURLData surlData = (SURLData) requestSURLsList.get(j); - if (surl.equals(surlData.surl)) { - statusOfTheSURL = surlData.statusCode; - requestSURLsList.remove(j); - surlFound = true; - break; - } - } - try { - if (surlFound) { - ExtendFileLifeTimeCommand.log.debug("Found SURL: " - + surl.getSURLString() + " (status: " + statusOfTheSURL.toString() - + ")"); - NamespaceInterface namespace = NamespaceDirector.getNamespace(); - StoRI stori = null; - try { - stori = namespace.resolveStoRIbySURL(surl, guser); - } catch (IllegalArgumentException e) { - log.error("Unable to build StoRI by SURL and user", e); - } catch (Exception e) { - log.info(String.format( - "Unable to build a stori for surl %s for user %s, %s: %s", surl, - guser, e.getClass().getCanonicalName(), e.getMessage())); - } - if (stori != null) { - LocalFile localFile = stori.getLocalFile(); - if (localFile.exists()) { - VolatileAndJiTCatalog catalog = VolatileAndJiTCatalog - .getInstance(); - List volatileInfo = catalog.volatileInfoOn(stori.getPFN()); - - if ((statusOfTheSURL != TStatusCode.SRM_FILE_PINNED) - && (statusOfTheSURL != TStatusCode.SRM_SPACE_AVAILABLE) - && (statusOfTheSURL != TStatusCode.SRM_SUCCESS)) - { - fileStatusCode = TStatusCode.SRM_INVALID_REQUEST; - fileStatusExplanation = "No TURL available"; - PINLifetime = null; - requestSuccess = false; - } else if (volatileInfo.size() > 2) { - fileStatusCode = TStatusCode.SRM_INTERNAL_ERROR; - fileStatusExplanation = "Found more than one entry.... that's a BUG."; - // For lifetimes infinite means also unknown - PINLifetime = TLifeTimeInSeconds.makeInfinite(); - requestSuccess = false; - } else { // OK, extend the PIN lifetime. - // If the status is success the extension will not take place, - // only in case of empty parameter the current value are - // returned, otherwaise the request must - // fail! - - if ((statusOfTheSURL == TStatusCode.SRM_SUCCESS) - && (!newPINLifetime.isEmpty())) { - - fileStatusCode = TStatusCode.SRM_INVALID_REQUEST; - fileStatusExplanation = "No TURL available"; - PINLifetime = null; - requestSuccess = false; - - } else { - - fileStatusCode = TStatusCode.SRM_SUCCESS; - - if (volatileInfo.isEmpty()) { // SURL is permanent - dbLifetime = TLifeTimeInSeconds.makeInfinite(); - } else { - dbLifetime = (TLifeTimeInSeconds) volatileInfo.get(1); - } - if ((!dbLifetime.isInfinite()) - && (newPINLifetime.value() > dbLifetime.value())) { - PINLifetime = dbLifetime; - fileStatusExplanation = "The requested PIN lifetime is greater than the lifetime of the SURL." - + " PIN lifetime is now equal to the lifetime of the SURL."; - } else { - PINLifetime = newPINLifetime; - fileStatusExplanation = "Lifetime extended"; - } - ExtendFileLifeTimeCommand.log.debug("New PIN lifetime is: " - + PINLifetime.value() + "(SURL: " + surl.getSURLString() - + ")"); - // TODO: update the RequestSummaryCatalog with the new - // pinLifetime - // it is better to do it only once after the for loop - requestFailure = false; - } - } - } else { // file does not exist in the file system - fileStatusCode = TStatusCode.SRM_INVALID_PATH; - fileStatusExplanation = "Invalid path"; - PINLifetime = null; - requestSuccess = false; - - } - } else { - log.error("Unable to build StoRI by SURL and user"); - fileStatusCode = TStatusCode.SRM_INTERNAL_ERROR; - fileStatusExplanation = "Unable to build StoRI by SURL and user"; - // For lifetimes infinite means also unknown - PINLifetime = null; - requestSuccess = false; - } - } else { // SURL not found in the DB - ExtendFileLifeTimeCommand.log.debug("SURL: " + surl.getSURLString() - + " NOT FOUND!"); - fileStatusCode = TStatusCode.SRM_INVALID_PATH; - fileStatusExplanation = "SURL not found in the request"; - PINLifetime = null; - requestSuccess = false; - } - // Set the file level information to be returned. - TReturnStatus fileStatus = new TReturnStatus(fileStatusCode, - fileStatusExplanation); - if (fileStatus.getStatusCode().equals(TStatusCode.SRM_SUCCESS)) { - ExtendFileLifeTimeCommand.log.info("srmExtendFileLifeTime: <" + guser - + "> Request for [token:" + requestToken + "] for [SURL:" + surl - + "] with [pinlifetime: " + newPINLifetime - + "] successfully done with: [status:" + fileStatus.toString() - + "]"); - } else { - ExtendFileLifeTimeCommand.log.error("srmExtendFileLifeTime: <" - + guser + "> Request for [token:" + requestToken + "] for [SURL:" - + surl + "] with [pinlifetime: " + newPINLifetime - + "] failed with: [status:" + fileStatus.toString() + "]"); - } - - TSURLLifetimeReturnStatus lifetimeReturnStatus = new TSURLLifetimeReturnStatus( - surl, fileStatus, dbLifetime, PINLifetime); - details.addTSurlReturnStatus(lifetimeReturnStatus); - } catch (InvalidTSURLLifetimeReturnStatusAttributeException e3) { - ExtendFileLifeTimeCommand.log - .debug("Thrown InvalidTSURLLifetimeReturnStatusAttributeException"); - } - } - - // Set global status - if (requestFailure) { - globalStatus = new TReturnStatus(TStatusCode.SRM_FAILURE, - "All file requests are failed"); - } else if (requestSuccess) { - globalStatus = new TReturnStatus(TStatusCode.SRM_SUCCESS, - "All file requests are successfully completed"); - } else { - globalStatus = new TReturnStatus(TStatusCode.SRM_PARTIAL_SUCCESS, - "Details are on the file statuses"); - } - return globalStatus; - } - - /** - * Returns the list of SURLs and statuses (a List of SURLData) belonging to - * the request identified by the requestToken. - * - * @param requestToken - * TRequestToken - * @return List - * @throws UnknownTokenException - * @throws IllegalArgumentException - * @throws ExpiredTokenException - */ - private List getListOfSURLsInTheRequest(GridUserInterface user, - TRequestToken requestToken) - throws IllegalArgumentException, UnknownTokenException, - ExpiredTokenException { - - List listOfSURLsInfo = new LinkedList(); - - SURLStatusManager checker = SURLStatusManagerFactory - .newSURLStatusManager(); - - Map surlStatusMap = - checker.getSURLStatuses(user, requestToken); - - if (!(surlStatusMap == null || surlStatusMap.isEmpty())) { - for (Entry surlStatus : surlStatusMap.entrySet()) { - listOfSURLsInfo.add(new SURLData(surlStatus.getKey(), surlStatus - .getValue().getStatusCode())); - } - } - return listOfSURLsInfo; - } - - private void printRequestOutcome(TReturnStatus status, - ExtendFileLifeTimeInputData inputData) { - - if (inputData != null) { - if (inputData.getArrayOfSURLs() != null) { - if (inputData.getRequestToken() != null) { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, - inputData, inputData.getRequestToken(), inputData.getArrayOfSURLs() - .asStringList()); - } else { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, - inputData, inputData.getArrayOfSURLs().asStringList()); - } - - } else { - if (inputData.getRequestToken() != null) { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, - inputData, inputData.getRequestToken()); - } else { - CommandHelper - .printRequestOutcome(SRM_COMMAND, log, status, inputData); - } - } - - } else { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); - } - } - - private class SURLData { - - public TSURL surl; - public TStatusCode statusCode; - - public SURLData(TSURL surl, TStatusCode statusCode) { - - this.surl = surl; - this.statusCode = statusCode; - } - } +public class ExtendFileLifeTimeCommand extends DataTransferCommand implements Command { + + private static final Logger log = LoggerFactory.getLogger(ExtendFileLifeTimeCommand.class); + private static final String SRM_COMMAND = "srmExtendFileLifeTime"; + + public ExtendFileLifeTimeCommand() { + + }; + + /** + * Executes an srmExtendFileLifeTime(). + * + * @param inputData ExtendFileLifeTimeInputData + * @return ExtendFileLifeTimeOutputData + */ + + public OutputData execute(InputData data) { + + final String funcName = "ExtendFileLifeTime: "; + ExtendFileLifeTimeOutputData outputData = new ExtendFileLifeTimeOutputData(); + IdentityExtendFileLifeTimeInputData inputData; + if (data instanceof IdentityInputData) { + inputData = (IdentityExtendFileLifeTimeInputData) data; + } else { + outputData.setReturnStatus(CommandHelper.buildStatus(TStatusCode.SRM_NOT_SUPPORTED, + "Anonymous user can not perform" + SRM_COMMAND)); + outputData.setArrayOfFileStatuses(null); + printRequestOutcome(outputData.getReturnStatus(), (ExtendFileLifeTimeInputData) data); + return outputData; + } + + TReturnStatus globalStatus = null; + + ExtendFileLifeTimeCommand.log.debug(funcName + "Started."); + + /****************************** Check for malformed request ******************************/ + if (inputData.getArrayOfSURLs() == null) { + globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, + "Missing mandatory parameter 'arrayOfSURLs'"); + } else if (inputData.getArrayOfSURLs().size() < 1) { + globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, + "Parameter 'arrayOfSURLs': invalid size"); + } else if (!(inputData.getNewPinLifetime().isEmpty()) + && !(inputData.getNewFileLifetime().isEmpty()) && (inputData.getRequestToken() != null)) { + globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, + "Cannot update both FileLifetime and PinLifetime"); + } else if (inputData.getNewPinLifetime().isEmpty() + && !(inputData.getNewFileLifetime().isEmpty()) && (inputData.getRequestToken() != null)) { + globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, + "Do not specify the request token to update the FileLifetime"); + } else if (!(inputData.getNewPinLifetime().isEmpty()) + && !(inputData.getNewFileLifetime().isEmpty()) && (inputData.getRequestToken() == null)) { + globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, + "Attempt to extend PinLifetime without request token"); + } else if (!(inputData.getNewPinLifetime().isEmpty()) + && inputData.getNewFileLifetime().isEmpty() && (inputData.getRequestToken() == null)) { + globalStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, + "Attempt to extend PinLifetime without request token"); + } + + if (globalStatus != null) { + ExtendFileLifeTimeCommand.log.debug(funcName + globalStatus.getExplanation()); + outputData.setReturnStatus(globalStatus); + outputData.setArrayOfFileStatuses(null); + printRequestOutcome(outputData.getReturnStatus(), inputData); + return outputData; + } + + /********************** + * Check user authentication and authorization + ******************************/ + GridUserInterface user = inputData.getUser(); + if (user == null) { + ExtendFileLifeTimeCommand.log.debug(funcName + "The user field is NULL"); + outputData.setReturnStatus(CommandHelper.buildStatus(TStatusCode.SRM_AUTHENTICATION_FAILURE, + "Unable to get user credential!")); + printRequestOutcome(outputData.getReturnStatus(), inputData); + outputData.setArrayOfFileStatuses(null); + return outputData; + } + + /********************************** + * Start to manage the request + ***********************************/ + ArrayOfTSURLLifetimeReturnStatus arrayOfFileStatus = new ArrayOfTSURLLifetimeReturnStatus(); + + if ((inputData.getRequestToken() == null) && (inputData.getNewPinLifetime().isEmpty())) { + log.debug(funcName + "Extending SURL lifetime..."); + globalStatus = manageExtendSURLLifetime(inputData.getNewFileLifetime(), + inputData.getArrayOfSURLs(), user, arrayOfFileStatus, inputData.getRequestToken()); + } else { + log.debug(funcName + "Extending PIN lifetime..."); + try { + globalStatus = manageExtendPinLifetime(inputData.getRequestToken(), + inputData.getNewPinLifetime(), inputData.getArrayOfSURLs(), user, arrayOfFileStatus); + } catch (IllegalArgumentException e) { + log.error(funcName + "Unexpected IllegalArgumentException: " + e.getMessage()); + globalStatus = + CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, "Request Failed, retry."); + outputData.setReturnStatus(globalStatus); + outputData.setArrayOfFileStatuses(null); + printRequestOutcome(outputData.getReturnStatus(), inputData); + return outputData; + } + } + + outputData.setReturnStatus(globalStatus); + outputData.setArrayOfFileStatuses(arrayOfFileStatus); + printRequestOutcome(outputData.getReturnStatus(), inputData); + log.debug(funcName + "Finished."); + + return outputData; + } + + /** + * Extend the lifetime of a SURL. The parameter details is filled by this method and contains file + * level information on the execution of the request. + * + * @param newLifetime TLifeTimeInSeconds. + * @param arrayOfSURLS ArrayOfSURLs. + * @param guser VomsGridUser. + * @param arrayOfFileLifetimeStatus . ArrayOfTSURLLifetimeReturnStatus The returned file level + * information. + * @return TReturnStatus. The request status. + */ + private TReturnStatus manageExtendSURLLifetime(TLifeTimeInSeconds newLifetime, + ArrayOfSURLs arrayOfSURLS, GridUserInterface guser, ArrayOfTSURLLifetimeReturnStatus details, + TRequestToken requestToken) { + + if (details == null) { + ExtendFileLifeTimeCommand.log.debug( + "Function manageExtendSURLLifetime, class ExtendFileLifeTimeExecutor: parameter details is NULL"); + } + Namespace namespace = Namespace.getInstance(); + VolatileAndJiTCatalog catalog = VolatileAndJiTCatalog.getInstance(); + boolean requestSuccess = true; + boolean requestFailure = true; + + // For each requested SURL, try to extend its lifetime. + for (int i = 0; i < arrayOfSURLS.size(); i++) { + TSURL surl = arrayOfSURLS.getTSURL(i); + StoRI stori = null; + TStatusCode fileStatusCode; + String fileStatusExplanation; + try { + try { + stori = namespace.resolveStoRIbySURL(surl, guser); + } catch (IllegalArgumentException e) { + ExtendFileLifeTimeCommand.log.error("Unable to build StoRI by SURL and user", e); + fileStatusCode = TStatusCode.SRM_INTERNAL_ERROR; + fileStatusExplanation = "Unable to build StoRI by SURL and user"; + } catch (UnapprochableSurlException e) { + log.info("Unable to build a stori for surl " + surl + " for user " + guser + + " UnapprochableSurlException: " + e.getMessage()); + fileStatusCode = TStatusCode.SRM_AUTHORIZATION_FAILURE; + fileStatusExplanation = e.getMessage(); + } catch (NamespaceException e) { + log.info("Unable to build a stori for surl " + surl + " for user " + guser + + " NamespaceException: " + e.getMessage()); + fileStatusCode = TStatusCode.SRM_INTERNAL_ERROR; + fileStatusExplanation = e.getMessage(); + } catch (InvalidSURLException e) { + log.info("Unable to build a stori for surl " + surl + " for user " + guser + + " InvalidSURLException: " + e.getMessage()); + fileStatusCode = TStatusCode.SRM_INVALID_PATH; + fileStatusExplanation = e.getMessage(); + } + if (stori != null) { + LocalFile localFile = stori.getLocalFile(); + if (localFile.exists()) { + ExtendFileLifeTimeCommand.log.debug(stori.getPFN().toString()); + List volatileInfo = catalog.volatileInfoOn(stori.getPFN()); + if (volatileInfo.isEmpty()) { + fileStatusCode = TStatusCode.SRM_SUCCESS; + fileStatusExplanation = "Nothing to do, SURL is permanent"; + newLifetime = TLifeTimeInSeconds.makeInfinite(); + requestFailure = false; + } else if (volatileInfo.size() > 2) { + fileStatusCode = TStatusCode.SRM_INTERNAL_ERROR; + fileStatusExplanation = "Found more than one entry.... that's a BUG."; + // For lifetimes infinite means also unknown + newLifetime = TLifeTimeInSeconds.makeInfinite(); + requestSuccess = false; + } else if (isStoRISURLBusy(stori)) { + fileStatusCode = TStatusCode.SRM_FILE_BUSY; + fileStatusExplanation = + "File status is SRM_SPACE_AVAILABLE. SURL lifetime cannot be extend (try with PIN lifetime)"; + // For lifetimes infinite means also unknown + newLifetime = TLifeTimeInSeconds.makeInfinite(); + requestSuccess = false; + } else { // Ok, extend the lifetime of the SURL + // Update the DB with the new lifetime + catalog.trackVolatile(stori.getPFN(), (Calendar) volatileInfo.get(0), newLifetime); + // TODO: return the correct lifetime, i.e. the one which is + // written to the DB. + // TLifeTimeInSeconds writtenLifetime = (TLifeTimeInSeconds) + // volatileInfo.get(1); + + fileStatusCode = TStatusCode.SRM_SUCCESS; + fileStatusExplanation = "Lifetime extended"; + requestFailure = false; + } + } else { // Requested SURL does not exists in the filesystem + fileStatusCode = TStatusCode.SRM_INVALID_PATH; + fileStatusExplanation = "File does not exist"; + requestSuccess = false; + } + + // Set the file level information to be returned. + TReturnStatus fileStatus = new TReturnStatus(fileStatusCode, fileStatusExplanation); + if (fileStatus.getStatusCode().equals(TStatusCode.SRM_SUCCESS)) { + ExtendFileLifeTimeCommand.log + .info("srmExtendFileLifeTime: <" + guser + "> Request for [token:" + requestToken + + "] for [SURL:" + surl + "] with [lifetime:" + newLifetime + + " ] successfully done with: [status:" + fileStatus + "]"); + } else { + ExtendFileLifeTimeCommand.log.error("srmExtendFileLifeTime: <" + guser + + "> Request for [token:" + requestToken + "] for [SURL:" + surl + + "] with [lifetime:" + newLifetime + "] failed with: [status:" + fileStatus + "]"); + } + TSURLLifetimeReturnStatus lifetimeReturnStatus = + new TSURLLifetimeReturnStatus(surl, fileStatus, newLifetime, null); + details.addTSurlReturnStatus(lifetimeReturnStatus); + } + } catch (InvalidTSURLLifetimeReturnStatusAttributeException e3) { + ExtendFileLifeTimeCommand.log + .debug("Thrown InvalidTSURLLifetimeReturnStatusAttributeException"); + } + } + TReturnStatus globalStatus = null; + // Set global status + if (requestFailure) { + globalStatus = new TReturnStatus(TStatusCode.SRM_FAILURE, "All file requests are failed"); + } else if (requestSuccess) { + globalStatus = new TReturnStatus(TStatusCode.SRM_SUCCESS, + "All file requests are successfully completed"); + } else { + globalStatus = + new TReturnStatus(TStatusCode.SRM_PARTIAL_SUCCESS, "Details are on the file statuses"); + } + return globalStatus; + } + + /** + * Returns true if the status of the SURL of the received StoRI is SRM_SPACE_AVAILABLE, false + * otherwise. This method queries the DB, therefore pay attention to possible performance issues. + * + * @return boolean + */ + private boolean isStoRISURLBusy(StoRI element) { + + SURLStatusManager checker = SURLStatusManagerFactory.newSURLStatusManager(); + + return checker.isSURLBusy(element.getSURL()); + } + + /** + * Extend the PIN lifetime of a SURL. The parameter details is filled by this method and contains + * file level information on the execution of the request. + * + * @param requestToken TRequestToken. + * @param newPINLifetime TLifeTimeInSeconds. + * @param arrayOfSURLS ArrayOfSURLs. + * @param guser VomsGridUser. + * @param details ArrayOfTSURLLifetimeReturnStatus. + * @return TReturnStatus. The request status. + * @throws UnknownTokenException + * @throws IllegalArgumentException + */ + private TReturnStatus manageExtendPinLifetime(TRequestToken requestToken, + TLifeTimeInSeconds newPINLifetime, ArrayOfSURLs arrayOfSURLS, GridUserInterface guser, + ArrayOfTSURLLifetimeReturnStatus details) throws IllegalArgumentException { + + if (details == null) { + ExtendFileLifeTimeCommand.log.debug( + "Function manageExtendSURLLifetime, class ExtendFileLifeTimeExecutor: parameter details is NULL"); + } + TReturnStatus globalStatus = null; + List requestSURLsList; + try { + requestSURLsList = getListOfSURLsInTheRequest(guser, requestToken); + } catch (UnknownTokenException e4) { + return CommandHelper.buildStatus(TStatusCode.SRM_INVALID_REQUEST, "Invalid request token"); + } catch (ExpiredTokenException e) { + return CommandHelper.buildStatus(TStatusCode.SRM_REQUEST_TIMED_OUT, "Request expired"); + } catch (AuthzException e) { + return CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage()); + } + if (requestSURLsList.isEmpty()) { + return CommandHelper.buildStatus(TStatusCode.SRM_INVALID_REQUEST, "Invalid request token"); + } + // Once we have the list of SURLs belonging to the request, we must check + // that the SURLs given by the user are consistent, that the resulting + // lifetime could be lower than the one requested (and for this we must read + // the Volatile table of the DB), that the SURLs are not released, aborted, + // expired or suspended and so on... therefore the purpose of all that stuff + // is to return the right information. I mean, no PIN lifetime is + // effectively extend, in StoRM the TURL corresponds to the SURL. + boolean requestSuccess = true; + boolean requestFailure = true; + TLifeTimeInSeconds PINLifetime; + TLifeTimeInSeconds dbLifetime = null; + for (int i = 0; i < arrayOfSURLS.size(); i++) { + TSURL surl = arrayOfSURLS.getTSURL(i); + TStatusCode statusOfTheSURL = null; + TStatusCode fileStatusCode; + String fileStatusExplanation; + boolean surlFound = false; + // Check if the current SURL belongs to the request token + for (int j = 0; j < requestSURLsList.size(); j++) { + SURLData surlData = (SURLData) requestSURLsList.get(j); + if (surl.equals(surlData.surl)) { + statusOfTheSURL = surlData.statusCode; + requestSURLsList.remove(j); + surlFound = true; + break; + } + } + try { + if (surlFound) { + ExtendFileLifeTimeCommand.log.debug("Found SURL: " + surl.getSURLString() + " (status: " + + statusOfTheSURL.toString() + ")"); + Namespace namespace = Namespace.getInstance(); + StoRI stori = null; + try { + stori = namespace.resolveStoRIbySURL(surl, guser); + } catch (IllegalArgumentException e) { + log.error("Unable to build StoRI by SURL and user", e); + } catch (Exception e) { + log.info(String.format("Unable to build a stori for surl %s for user %s, %s: %s", surl, + guser, e.getClass().getCanonicalName(), e.getMessage())); + } + if (stori != null) { + LocalFile localFile = stori.getLocalFile(); + if (localFile.exists()) { + VolatileAndJiTCatalog catalog = VolatileAndJiTCatalog.getInstance(); + List volatileInfo = catalog.volatileInfoOn(stori.getPFN()); + + if ((statusOfTheSURL != TStatusCode.SRM_FILE_PINNED) + && (statusOfTheSURL != TStatusCode.SRM_SPACE_AVAILABLE) + && (statusOfTheSURL != TStatusCode.SRM_SUCCESS)) { + fileStatusCode = TStatusCode.SRM_INVALID_REQUEST; + fileStatusExplanation = "No TURL available"; + PINLifetime = null; + requestSuccess = false; + } else if (volatileInfo.size() > 2) { + fileStatusCode = TStatusCode.SRM_INTERNAL_ERROR; + fileStatusExplanation = "Found more than one entry.... that's a BUG."; + // For lifetimes infinite means also unknown + PINLifetime = TLifeTimeInSeconds.makeInfinite(); + requestSuccess = false; + } else { // OK, extend the PIN lifetime. + // If the status is success the extension will not take place, + // only in case of empty parameter the current value are + // returned, otherwaise the request must + // fail! + + if ((statusOfTheSURL == TStatusCode.SRM_SUCCESS) && (!newPINLifetime.isEmpty())) { + + fileStatusCode = TStatusCode.SRM_INVALID_REQUEST; + fileStatusExplanation = "No TURL available"; + PINLifetime = null; + requestSuccess = false; + + } else { + + fileStatusCode = TStatusCode.SRM_SUCCESS; + + if (volatileInfo.isEmpty()) { // SURL is permanent + dbLifetime = TLifeTimeInSeconds.makeInfinite(); + } else { + dbLifetime = (TLifeTimeInSeconds) volatileInfo.get(1); + } + if ((!dbLifetime.isInfinite()) && (newPINLifetime.value() > dbLifetime.value())) { + PINLifetime = dbLifetime; + fileStatusExplanation = + "The requested PIN lifetime is greater than the lifetime of the SURL." + + " PIN lifetime is now equal to the lifetime of the SURL."; + } else { + PINLifetime = newPINLifetime; + fileStatusExplanation = "Lifetime extended"; + } + ExtendFileLifeTimeCommand.log.debug("New PIN lifetime is: " + PINLifetime.value() + + "(SURL: " + surl.getSURLString() + ")"); + // TODO: update the RequestSummaryCatalog with the new + // pinLifetime + // it is better to do it only once after the for loop + requestFailure = false; + } + } + } else { // file does not exist in the file system + fileStatusCode = TStatusCode.SRM_INVALID_PATH; + fileStatusExplanation = "Invalid path"; + PINLifetime = null; + requestSuccess = false; + + } + } else { + log.error("Unable to build StoRI by SURL and user"); + fileStatusCode = TStatusCode.SRM_INTERNAL_ERROR; + fileStatusExplanation = "Unable to build StoRI by SURL and user"; + // For lifetimes infinite means also unknown + PINLifetime = null; + requestSuccess = false; + } + } else { // SURL not found in the DB + ExtendFileLifeTimeCommand.log.debug("SURL: " + surl.getSURLString() + " NOT FOUND!"); + fileStatusCode = TStatusCode.SRM_INVALID_PATH; + fileStatusExplanation = "SURL not found in the request"; + PINLifetime = null; + requestSuccess = false; + } + // Set the file level information to be returned. + TReturnStatus fileStatus = new TReturnStatus(fileStatusCode, fileStatusExplanation); + if (fileStatus.getStatusCode().equals(TStatusCode.SRM_SUCCESS)) { + ExtendFileLifeTimeCommand.log + .info("srmExtendFileLifeTime: <" + guser + "> Request for [token:" + requestToken + + "] for [SURL:" + surl + "] with [pinlifetime: " + newPINLifetime + + "] successfully done with: [status:" + fileStatus.toString() + "]"); + } else { + ExtendFileLifeTimeCommand.log + .error("srmExtendFileLifeTime: <" + guser + "> Request for [token:" + requestToken + + "] for [SURL:" + surl + "] with [pinlifetime: " + newPINLifetime + + "] failed with: [status:" + fileStatus.toString() + "]"); + } + + TSURLLifetimeReturnStatus lifetimeReturnStatus = + new TSURLLifetimeReturnStatus(surl, fileStatus, dbLifetime, PINLifetime); + details.addTSurlReturnStatus(lifetimeReturnStatus); + } catch (InvalidTSURLLifetimeReturnStatusAttributeException e3) { + ExtendFileLifeTimeCommand.log + .debug("Thrown InvalidTSURLLifetimeReturnStatusAttributeException"); + } + } + + // Set global status + if (requestFailure) { + globalStatus = new TReturnStatus(TStatusCode.SRM_FAILURE, "All file requests are failed"); + } else if (requestSuccess) { + globalStatus = new TReturnStatus(TStatusCode.SRM_SUCCESS, + "All file requests are successfully completed"); + } else { + globalStatus = + new TReturnStatus(TStatusCode.SRM_PARTIAL_SUCCESS, "Details are on the file statuses"); + } + return globalStatus; + } + + /** + * Returns the list of SURLs and statuses (a List of SURLData) belonging to the request identified + * by the requestToken. + * + * @param requestToken TRequestToken + * @return List + * @throws UnknownTokenException + * @throws IllegalArgumentException + * @throws ExpiredTokenException + */ + private List getListOfSURLsInTheRequest(GridUserInterface user, + TRequestToken requestToken) + throws IllegalArgumentException, UnknownTokenException, ExpiredTokenException { + + List listOfSURLsInfo = new LinkedList(); + + SURLStatusManager checker = SURLStatusManagerFactory.newSURLStatusManager(); + + Map surlStatusMap = checker.getSURLStatuses(user, requestToken); + + if (!(surlStatusMap == null || surlStatusMap.isEmpty())) { + for (Entry surlStatus : surlStatusMap.entrySet()) { + listOfSURLsInfo + .add(new SURLData(surlStatus.getKey(), surlStatus.getValue().getStatusCode())); + } + } + return listOfSURLsInfo; + } + + private void printRequestOutcome(TReturnStatus status, ExtendFileLifeTimeInputData inputData) { + + if (inputData != null) { + if (inputData.getArrayOfSURLs() != null) { + if (inputData.getRequestToken() != null) { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData, + inputData.getRequestToken(), inputData.getArrayOfSURLs().asStringList()); + } else { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData, + inputData.getArrayOfSURLs().asStringList()); + } + + } else { + if (inputData.getRequestToken() != null) { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData, + inputData.getRequestToken()); + } else { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData); + } + } + + } else { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); + } + } + + private class SURLData { + + public TSURL surl; + public TStatusCode statusCode; + + public SURLData(TSURL surl, TStatusCode statusCode) { + + this.surl = surl; + this.statusCode = statusCode; + } + } } diff --git a/src/main/java/it/grid/storm/synchcall/command/datatransfer/FileTransferRequestStatusCommand.java b/src/main/java/it/grid/storm/synchcall/command/datatransfer/FileTransferRequestStatusCommand.java index 86ef9e6c4..e27a1377b 100644 --- a/src/main/java/it/grid/storm/synchcall/command/datatransfer/FileTransferRequestStatusCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/datatransfer/FileTransferRequestStatusCommand.java @@ -6,10 +6,10 @@ import it.grid.storm.authz.AuthzException; import it.grid.storm.catalogs.RequestSummaryCatalog; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.catalogs.surl.SURLStatusManager; import it.grid.storm.catalogs.surl.SURLStatusManagerFactory; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.srm.types.ArrayOfTSURLReturnStatus; import it.grid.storm.srm.types.TRequestToken; import it.grid.storm.srm.types.TRequestType; diff --git a/src/main/java/it/grid/storm/synchcall/command/datatransfer/PtGAbortExecutor.java b/src/main/java/it/grid/storm/synchcall/command/datatransfer/PtGAbortExecutor.java index a5d57b41f..1988ab4e8 100644 --- a/src/main/java/it/grid/storm/synchcall/command/datatransfer/PtGAbortExecutor.java +++ b/src/main/java/it/grid/storm/synchcall/command/datatransfer/PtGAbortExecutor.java @@ -15,7 +15,7 @@ import it.grid.storm.catalogs.RequestSummaryCatalog; import it.grid.storm.catalogs.surl.SURLStatusManager; import it.grid.storm.catalogs.surl.SURLStatusManagerFactory; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.srm.types.ArrayOfSURLs; import it.grid.storm.srm.types.ArrayOfTSURLReturnStatus; @@ -42,7 +42,7 @@ public class PtGAbortExecutor implements AbortExecutorInterface { - static Configuration config = Configuration.getInstance(); + static StormConfiguration config = StormConfiguration.getInstance(); private static int maxLoopTimes = PtGAbortExecutor.config.getMaxLoop(); private static final Logger log = LoggerFactory diff --git a/src/main/java/it/grid/storm/synchcall/command/datatransfer/PtPAbortExecutor.java b/src/main/java/it/grid/storm/synchcall/command/datatransfer/PtPAbortExecutor.java index 7769663ed..6565d53e2 100644 --- a/src/main/java/it/grid/storm/synchcall/command/datatransfer/PtPAbortExecutor.java +++ b/src/main/java/it/grid/storm/synchcall/command/datatransfer/PtPAbortExecutor.java @@ -14,19 +14,18 @@ import it.grid.storm.authz.AuthzException; import it.grid.storm.catalogs.PtPChunkCatalog; -import it.grid.storm.catalogs.PtPPersistentChunkData; import it.grid.storm.catalogs.RequestSummaryCatalog; import it.grid.storm.catalogs.surl.SURLStatusManager; import it.grid.storm.catalogs.surl.SURLStatusManagerFactory; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; import it.grid.storm.filesystem.LocalFile; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; +import it.grid.storm.persistence.model.PtPPersistentChunkData; import it.grid.storm.srm.types.ArrayOfSURLs; import it.grid.storm.srm.types.ArrayOfTSURLReturnStatus; import it.grid.storm.srm.types.TRequestToken; @@ -73,10 +72,10 @@ public class PtPAbortExecutor implements AbortExecutorInterface { private static final Logger log = LoggerFactory.getLogger(PtPAbortExecutor.class); - static Configuration config = Configuration.getInstance(); + static StormConfiguration config = StormConfiguration.getInstance(); private static int maxLoopTimes = PtPAbortExecutor.config.getMaxLoop(); - private NamespaceInterface namespace; + private Namespace namespace; private final List acceptedStatuses = Lists.newArrayList(SRM_SPACE_AVAILABLE, SRM_REQUEST_QUEUED); @@ -84,7 +83,7 @@ public class PtPAbortExecutor implements AbortExecutorInterface { public AbortGeneralOutputData doIt(AbortInputData inputData) { // Used to delete the physical file - namespace = NamespaceDirector.getNamespace(); + namespace = Namespace.getInstance(); AbortGeneralOutputData outputData = new AbortGeneralOutputData(); ArrayOfTSURLReturnStatus arrayOfTSurlRetStatus = new ArrayOfTSURLReturnStatus(); @@ -405,7 +404,7 @@ private TSURLReturnStatus manageAuthorizedAbort(GridUserInterface user, TRequest TSURL surl, TReturnStatus status, AbortInputData inputData) { boolean failure = false; - namespace = NamespaceDirector.getNamespace(); + namespace = Namespace.getInstance(); TSURLReturnStatus surlReturnStatus = new TSURLReturnStatus(); surlReturnStatus.setSurl(surl); @@ -558,7 +557,7 @@ private TSURLReturnStatus manageAuthorizedAbort(GridUserInterface user, TRequest // in the JitCatalog. In this way the next time the GarbageCollector // will wake up, it will remove the entry as expired. // The file is already removed, but the garbage collection is - // smart enought to manage the case. + // smart enough to manage the case. // Change status to aborted if (failure) { @@ -574,9 +573,7 @@ private TSURLReturnStatus manageAuthorizedAbort(GridUserInterface user, TRequest surlReturnStatus .setStatus(new TReturnStatus(SRM_SUCCESS, "File request successfully aborted.")); try { - NamespaceDirector.getNamespace() - .resolveVFSbyLocalFile(fileToRemove) - .decreaseUsedSpace(sizeToRemove); + Namespace.getInstance().resolveVFSbyLocalFile(fileToRemove).decreaseUsedSpace(sizeToRemove); } catch (NamespaceException e) { log.error(e.getMessage()); surlReturnStatus.getStatus() diff --git a/src/main/java/it/grid/storm/synchcall/command/datatransfer/PutDoneCommand.java b/src/main/java/it/grid/storm/synchcall/command/datatransfer/PutDoneCommand.java index b071f5fbb..4c2304ae1 100644 --- a/src/main/java/it/grid/storm/synchcall/command/datatransfer/PutDoneCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/datatransfer/PutDoneCommand.java @@ -28,17 +28,19 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Lists; +import it.grid.storm.acl.AclManagerFS; import it.grid.storm.authz.AuthzException; import it.grid.storm.catalogs.VolatileAndJiTCatalog; import it.grid.storm.catalogs.surl.SURLStatusManager; import it.grid.storm.catalogs.surl.SURLStatusManagerFactory; import it.grid.storm.common.types.PFN; +import it.grid.storm.config.StormConfiguration; import it.grid.storm.ea.StormEA; import it.grid.storm.filesystem.LocalFile; import it.grid.storm.griduser.CannotMapUserException; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.LocalUser; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.model.VirtualFS; @@ -68,6 +70,19 @@ public class PutDoneCommand extends DataTransferCommand implements Command { private static final String SRM_COMMAND = "srmPutDone"; + /** + * boolean that indicates if setting ACL on the 0-size file is necessary or not + */ + protected boolean setupACLs = true; + + public PutDoneCommand() { + + if (StormConfiguration.getInstance().getPTPSkipACLSetup()) { + setupACLs = false; + log.debug("Skipping ACL setup on PTP as requested by configuration."); + } + } + private ManageFileTransferRequestFilesInputData inputDataSanityCheck(InputData inputData) throws PutDoneCommandException { @@ -91,9 +106,9 @@ private ManageFileTransferRequestFilesInputData inputDataSanityCheck(InputData i } return data; } - - private TReturnStatus buildGlobalStatus(boolean atLeastOneSuccess, - boolean atLeastOneFailure, boolean atLeastOneAborted) { + + private TReturnStatus buildGlobalStatus(boolean atLeastOneSuccess, boolean atLeastOneFailure, + boolean atLeastOneAborted) { if (atLeastOneSuccess) { if (!atLeastOneFailure && !atLeastOneAborted) { @@ -105,7 +120,7 @@ private TReturnStatus buildGlobalStatus(boolean atLeastOneSuccess, if (atLeastOneFailure) { if (!atLeastOneAborted) { return buildStatus(SRM_FAILURE, "All file requests are failed"); - } + } return buildStatus(SRM_FAILURE, "Some file requests are failed, the others are aborted"); } @@ -118,32 +133,33 @@ private TReturnStatus buildGlobalStatus(boolean atLeastOneSuccess, return buildStatus(SRM_INTERNAL_ERROR, "Request Failed, no surl status recognized, retry."); } - private void markSURLsReadyForRead(TRequestToken requestToken, List spaceAvailableSURLs) throws PutDoneCommandException { - + private void markSURLsReadyForRead(TRequestToken requestToken, List spaceAvailableSURLs) + throws PutDoneCommandException { + if (spaceAvailableSURLs.isEmpty()) { log.debug("markSURLsReadyForRead: empty spaceAvailableSURLs"); return; } - + SURLStatusManager checker = SURLStatusManagerFactory.newSURLStatusManager(); try { - + checker.markSURLsReadyForRead(requestToken, spaceAvailableSURLs); } catch (IllegalArgumentException e) { - + log.error("PutDone: Unexpected IllegalArgumentException '{}'", e.getMessage()); - throw new PutDoneCommandException(CommandHelper.buildStatus(SRM_INTERNAL_ERROR, "Request Failed, retry."), e); + throw new PutDoneCommandException( + CommandHelper.buildStatus(SRM_INTERNAL_ERROR, "Request Failed, retry."), e); } } - + private ArrayOfTSURLReturnStatus loadSURLsStatuses( - ManageFileTransferRequestFilesInputData inputData) - throws PutDoneCommandException { - + ManageFileTransferRequestFilesInputData inputData) throws PutDoneCommandException { + TRequestToken requestToken = inputData.getRequestToken(); List listOfSURLs = inputData.getArrayOfSURLs().getArrayList(); - + ArrayOfTSURLReturnStatus surlsStatuses = null; try { @@ -156,83 +172,78 @@ private ArrayOfTSURLReturnStatus loadSURLsStatuses( } catch (IllegalArgumentException e) { - log.error("PutDone: Unexpected IllegalArgumentException: {}", - e.getMessage(), e); + log.error("PutDone: Unexpected IllegalArgumentException: {}", e.getMessage(), e); throw new PutDoneCommandException(buildStatus(SRM_INTERNAL_ERROR, "Request Failed, retry.")); } catch (RequestUnknownException e) { - log.info( - "PutDone: Invalid request token and surl. RequestUnknownException: {}", - e.getMessage(), e); + log.info("PutDone: Invalid request token and surl. RequestUnknownException: {}", + e.getMessage(), e); throw new PutDoneCommandException( buildStatus(SRM_INVALID_REQUEST, "Invalid request token and surls")); } catch (UnknownTokenException e) { - log.info("PutDone: Invalid request token. UnknownTokenException: {}", - e.getMessage(), e); + log.info("PutDone: Invalid request token. UnknownTokenException: {}", e.getMessage(), e); throw new PutDoneCommandException(buildStatus(SRM_INVALID_REQUEST, "Invalid request token")); } catch (ExpiredTokenException e) { - log.info("PutDone: The request is expired: ExpiredTokenException: {}", - e.getMessage(), e); + log.info("PutDone: The request is expired: ExpiredTokenException: {}", e.getMessage(), e); throw new PutDoneCommandException(buildStatus(SRM_REQUEST_TIMED_OUT, "Request expired")); } return surlsStatuses; } - - + + /** - * Implements the srmPutDone. Used to notify the SRM that the client completed - * a file transfer to the TransferURL in the allocated space (by a - * PrepareToPut). + * Implements the srmPutDone. Used to notify the SRM that the client completed a file transfer to + * the TransferURL in the allocated space (by a PrepareToPut). */ public OutputData execute(InputData absData) { log.debug("PutDone: Started."); - + TReturnStatus globalStatus = null; ArrayOfTSURLReturnStatus surlsStatuses = null; - + boolean atLeastOneSuccess = false; boolean atLeastOneFailure = false; boolean atLeastOneAborted = false; ManageFileTransferRequestFilesInputData inputData = null; try { - + inputData = inputDataSanityCheck(absData); - + } catch (PutDoneCommandException e) { printRequestOutcome(e.getReturnStatus()); return new ManageFileTransferOutputData(e.getReturnStatus()); } - GridUserInterface user = inputData instanceof IdentityInputData - ? ((IdentityInputData) inputData).getUser() : null; + GridUserInterface user = + inputData instanceof IdentityInputData ? ((IdentityInputData) inputData).getUser() : null; TRequestToken requestToken = inputData.getRequestToken(); List spaceAvailableSURLs = Lists.newArrayList(); - + try { - + surlsStatuses = loadSURLsStatuses(inputData); - + } catch (PutDoneCommandException e) { - + printRequestOutcome(e.getReturnStatus(), inputData); - return new ManageFileTransferOutputData(e.getReturnStatus()); + return new ManageFileTransferOutputData(e.getReturnStatus()); } - - + + for (TSURLReturnStatus surlStatus : surlsStatuses.getArray()) { - + TReturnStatus newStatus; TReturnStatus currentStatus = surlStatus.getStatus(); - + switch (currentStatus.getStatusCode()) { case SRM_SPACE_AVAILABLE: @@ -240,7 +251,7 @@ public OutputData execute(InputData absData) { spaceAvailableSURLs.add(surlStatus.getSurl()); // DO PutDone try { - executePutDone(surlStatus.getSurl(), user); + executePutDone(surlStatus.getSurl(), user, setupACLs); } catch (PutDoneCommandException e) { newStatus = e.getReturnStatus(); atLeastOneFailure = true; @@ -272,24 +283,23 @@ public OutputData execute(InputData absData) { surlsStatuses.updateStatus(surlStatus, newStatus); } - + try { - + markSURLsReadyForRead(requestToken, spaceAvailableSURLs); } catch (PutDoneCommandException e) { - + printRequestOutcome(e.getReturnStatus(), inputData); - return new ManageFileTransferOutputData(e.getReturnStatus()); + return new ManageFileTransferOutputData(e.getReturnStatus()); } - + log.debug("PutDone: Computing final global status ..."); - globalStatus = buildGlobalStatus(atLeastOneSuccess, atLeastOneFailure, - atLeastOneAborted); - + globalStatus = buildGlobalStatus(atLeastOneSuccess, atLeastOneFailure, atLeastOneAborted); + log.debug("PutDone: Finished with status {}", globalStatus); printRequestOutcome(globalStatus, inputData); - + return new ManageFileTransferOutputData(globalStatus, surlsStatuses); } @@ -299,31 +309,30 @@ private static void printRequestOutcome(TReturnStatus status) { CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); } - private static void printRequestOutcome(TReturnStatus status, ManageFileTransferRequestFilesInputData inputData) { + private static void printRequestOutcome(TReturnStatus status, + ManageFileTransferRequestFilesInputData inputData) { Preconditions.checkNotNull(inputData); Preconditions.checkNotNull(status); CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData, - inputData.getRequestToken(), inputData.getArrayOfSURLs().asStringList()); + inputData.getRequestToken(), inputData.getArrayOfSURLs().asStringList()); } private ArrayOfTSURLReturnStatus loadSURLsStatus(GridUserInterface user, - TRequestToken requestToken, List inputSURLs) - throws RequestUnknownException { + TRequestToken requestToken, List inputSURLs) throws RequestUnknownException { - ArrayOfTSURLReturnStatus returnStatuses = new ArrayOfTSURLReturnStatus( - inputSURLs.size()); + ArrayOfTSURLReturnStatus returnStatuses = new ArrayOfTSURLReturnStatus(inputSURLs.size()); SURLStatusManager checker = SURLStatusManagerFactory.newSURLStatusManager(); - Map surlsStatuses = checker.getSURLStatuses(user, - requestToken, inputSURLs); + Map surlsStatuses = + checker.getSURLStatuses(user, requestToken, inputSURLs); if (surlsStatuses.isEmpty()) { log.info("PutDone: No one of the requested surls found for the provided token"); throw new RequestUnknownException( - "No one of the requested surls found for the provided token"); + "No one of the requested surls found for the provided token"); } TReturnStatus status = null; @@ -337,7 +346,7 @@ private ArrayOfTSURLReturnStatus loadSURLsStatus(GridUserInterface user, } else { log.debug("PutDone: SURL '{}' NOT found in the DB!", surl); status = new TReturnStatus(SRM_INVALID_PATH, - "SURL does not refer to an existing file for the specified request token"); + "SURL does not refer to an existing file for the specified request token"); } TSURLReturnStatus surlRetStatus = new TSURLReturnStatus(surl, status); returnStatuses.addTSurlReturnStatus(surlRetStatus); @@ -345,100 +354,113 @@ private ArrayOfTSURLReturnStatus loadSURLsStatus(GridUserInterface user, return returnStatuses; } - public static boolean executePutDone(TSURL surl) throws PutDoneCommandException { - return executePutDone(surl, null); + public static boolean executePutDone(TSURL surl) throws PutDoneCommandException { + return executePutDone(surl, null, StormConfiguration.getInstance().getPTPSkipACLSetup()); + } + + public static boolean executePutDone(TSURL surl, GridUserInterface user, boolean setupACLs) + throws PutDoneCommandException { + + Preconditions.checkNotNull(surl, "Null SURL received"); + + log.debug("Executing PutDone for SURL: {}", surl.getSURLString()); + + String userStr = user == null ? "Anonymous" : user.toString(); + StoRI stori = null; + + try { + + stori = Namespace.getInstance().resolveStoRIbySURL(surl, user); + + } catch (IllegalArgumentException e) { + + log.error(String.format("User %s is unable to build a stori for surl %s, %s: %s", userStr, + surl, e.getClass().getName(), e.getMessage())); + throw new PutDoneCommandException(buildStatus(SRM_INTERNAL_ERROR, e.getMessage()), e); + + } catch (Exception e) { + + log.info(String.format("User %s is unable to build a stori for surl %s, %s: %s", userStr, + surl, e.getClass().getName(), e.getMessage()), e); + return false; + + } + + // 1- if the SURL is volatile update the entry in the Volatile table + if (VolatileAndJiTCatalog.getInstance().exists(stori.getPFN())) { + try { + VolatileAndJiTCatalog.getInstance().setStartTime(stori.getPFN(), Calendar.getInstance()); + } catch (Exception e) { + // impossible because of the "exists" check + } + } + + if (setupACLs) { + + log.debug("PutDone: JiT case, removing ACEs on SURL: " + surl.toString()); + // Retrieve the PFN of the SURL parents + List storiParentsList = stori.getParents(); + List pfnParentsList = Lists.newArrayList(); + + for (StoRI parentStoRI : storiParentsList) { + pfnParentsList.add(parentStoRI.getPFN()); + } + LocalUser localUser = null; + try { + if (user != null) { + localUser = user.getLocalUser(); + } + } catch (CannotMapUserException e) { + log.warn("PutDone: Unable to get the local user for user {}. CannotMapUserException: {}", + user, e.getMessage(), e); + } + + if (stori.hasJustInTimeACLs()) { + if (localUser != null) { + VolatileAndJiTCatalog.getInstance().expirePutJiTs(stori.getPFN(), localUser); + } else { + VolatileAndJiTCatalog.getInstance().removeAllJiTsOn(stori.getPFN()); + } + } else { + unsetAoTAcl(stori, localUser); + } + } + + // 3- compute the checksum and store it in an extended attribute + LocalFile localFile = stori.getLocalFile(); + + VirtualFS vfs = null; + try { + vfs = Namespace.getInstance().resolveVFSbyLocalFile(localFile); + } catch (NamespaceException e) { + log.error(e.getMessage(), e); + return false; + } + + // 4- Tape stuff management. + if (vfs.getStorageClassType().isTapeEnabled()) { + String fileAbosolutePath = localFile.getAbsolutePath(); + StormEA.removePinned(fileAbosolutePath); + StormEA.setPremigrate(fileAbosolutePath); } - public static boolean executePutDone(TSURL surl, GridUserInterface user) - throws PutDoneCommandException { - - Preconditions.checkNotNull(surl, "Null SURL received"); - - log.debug("Executing PutDone for SURL: {}", surl.getSURLString()); - - String userStr = user == null ? "Anonymous" : user.toString(); - StoRI stori = null; - - try { - - stori = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl, user); - - } catch (IllegalArgumentException e) { - - log.error( - String.format("User %s is unable to build a stori for surl %s, %s: %s", - userStr, surl, e.getClass().getName(), e.getMessage())); - throw new PutDoneCommandException(buildStatus(SRM_INTERNAL_ERROR, e.getMessage()), e); - - } catch (Exception e) { - - log.info( - String.format("User %s is unable to build a stori for surl %s, %s: %s", - userStr, surl, e.getClass().getName(), e.getMessage()), e); - return false; - - } - - // 1- if the SURL is volatile update the entry in the Volatile table - if (VolatileAndJiTCatalog.getInstance().exists(stori.getPFN())) { - try { - VolatileAndJiTCatalog.getInstance().setStartTime(stori.getPFN(), - Calendar.getInstance()); - } catch (Exception e) { - // impossible because of the "exists" check - } - } - - // 2- JiTs must me removed from the TURL - if (stori.hasJustInTimeACLs()) { - log.debug("PutDone: JiT case, removing ACEs on SURL: " + surl.toString()); - // Retrieve the PFN of the SURL parents - List storiParentsList = stori.getParents(); - List pfnParentsList = Lists.newArrayList(); - - for (StoRI parentStoRI : storiParentsList) { - pfnParentsList.add(parentStoRI.getPFN()); - } - LocalUser localUser = null; - try { - if (user != null) { - localUser = user.getLocalUser(); - } - } catch (CannotMapUserException e) { - log.warn( - "PutDone: Unable to get the local user for user {}. CannotMapUserException: {}", - user, e.getMessage(), e); - } - if (localUser != null) { - VolatileAndJiTCatalog.getInstance().expirePutJiTs(stori.getPFN(), - localUser); - } else { - VolatileAndJiTCatalog.getInstance().removeAllJiTsOn(stori.getPFN()); - } - } - - // 3- compute the checksum and store it in an extended attribute - LocalFile localFile = stori.getLocalFile(); - - VirtualFS vfs = null; - try { - vfs = NamespaceDirector.getNamespace().resolveVFSbyLocalFile(localFile); - } catch (NamespaceException e) { - log.error(e.getMessage(), e); - return false; - } - - // 4- Tape stuff management. - if (vfs.getStorageClassType().isTapeEnabled()) { - String fileAbosolutePath = localFile.getAbsolutePath(); - StormEA.removePinned(fileAbosolutePath); - StormEA.setPremigrate(fileAbosolutePath); - } - - // 5- Update UsedSpace into DB - vfs.increaseUsedSpace(localFile.getSize()); - - return true; - } + // 5- Update UsedSpace into DB + vfs.increaseUsedSpace(localFile.getSize()); + + return true; + } + + private static boolean unsetAoTAcl(StoRI fileStori, LocalUser localUser) { + log.debug("SrmMkdir: Removing AoT ACL {} to user {} for file: '{}'", localUser, + fileStori.getAbsolutePath()); + try { + AclManagerFS.getInstance().removeGroupPermission(fileStori.getLocalFile(), localUser); + } catch (IllegalArgumentException e) { + log.error("Unable to remove user traverse permission on parent file. " + + "IllegalArgumentException: {}", e.getMessage(), e); + return false; + } + return true; + } } diff --git a/src/main/java/it/grid/storm/synchcall/command/datatransfer/ReleaseFilesCommand.java b/src/main/java/it/grid/storm/synchcall/command/datatransfer/ReleaseFilesCommand.java index 1e60f2216..3c817e48e 100644 --- a/src/main/java/it/grid/storm/synchcall/command/datatransfer/ReleaseFilesCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/datatransfer/ReleaseFilesCommand.java @@ -9,7 +9,7 @@ import it.grid.storm.catalogs.surl.SURLStatusManagerFactory; import it.grid.storm.ea.StormEA; import it.grid.storm.griduser.GridUserInterface; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.StoRI; import it.grid.storm.srm.types.ArrayOfTSURLReturnStatus; import it.grid.storm.srm.types.TRequestToken; @@ -38,30 +38,14 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** - * - * This class is part of the StoRM project. Copyright (c) 2008 INFN-CNAF. - *

- * - * - * Authors: - * - * @author=lucamag luca.magnoniATcnaf.infn.it - * @author Alberto Forti - * - * @date = Oct 10, 2008 - * - */ - public class ReleaseFilesCommand extends DataTransferCommand implements Command { - private static final Logger log = LoggerFactory - .getLogger(ReleaseFilesCommand.class); + private static final Logger log = LoggerFactory.getLogger(ReleaseFilesCommand.class); private static final String SRM_COMMAND = "srmReleaseFiles"; - private static final EnumSet PINNED_OR_SUCCESS = EnumSet.of( - TStatusCode.SRM_SUCCESS, TStatusCode.SRM_FILE_PINNED); + private static final EnumSet PINNED_OR_SUCCESS = + EnumSet.of(TStatusCode.SRM_SUCCESS, TStatusCode.SRM_FILE_PINNED); public ReleaseFilesCommand() { @@ -79,8 +63,7 @@ public TRequestToken getTokenFromInputData(InputData inputData) { public List getSURLListFromInputData(InputData inputData) { if (inputDataHasSURLArray(inputData)) { - return ((ManageFileTransferFilesInputData) inputData).getArrayOfSURLs() - .getArrayList(); + return ((ManageFileTransferFilesInputData) inputData).getArrayOfSURLs().getArrayList(); } return null; } @@ -96,30 +79,28 @@ private List toStringList(List surls) { public boolean validInputData(InputData inputData) { return (inputData instanceof ManageFileTransferRequestFilesInputData) - || (inputData instanceof ManageFileTransferFilesInputData) - || (inputData instanceof ManageFileTransferRequestInputData); + || (inputData instanceof ManageFileTransferFilesInputData) + || (inputData instanceof ManageFileTransferRequestInputData); } public boolean inputDataHasToken(InputData inputData) { return (inputData instanceof ManageFileTransferRequestFilesInputData) - || (inputData instanceof ManageFileTransferRequestInputData); + || (inputData instanceof ManageFileTransferRequestInputData); } public boolean inputDataHasSURLArray(InputData inputData) { return (inputData instanceof ManageFileTransferRequestFilesInputData) - || (inputData instanceof ManageFileTransferFilesInputData); + || (inputData instanceof ManageFileTransferFilesInputData); } public OutputData handleNullInputData(InputData inputData) { - log.error("ReleaseFiles: Invalid input parameters specified: inputData=" - + inputData); + log.error("ReleaseFiles: Invalid input parameters specified: inputData=" + inputData); ManageFileTransferOutputData outputData = new ManageFileTransferOutputData( - CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, - "Empty request parametes")); + CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, "Empty request parametes")); logRequestOutcome(outputData.getReturnStatus(), inputData); @@ -127,14 +108,12 @@ public OutputData handleNullInputData(InputData inputData) { } - public OutputData handleInvalidRequest(InputData in, - IllegalArgumentException e) { + public OutputData handleInvalidRequest(InputData in, IllegalArgumentException e) { log.warn(e.getMessage(), e); ManageFileTransferOutputData outputData = new ManageFileTransferOutputData( - CommandHelper.buildStatus(TStatusCode.SRM_FAILURE, - "Internal error: " + e.getMessage())); + CommandHelper.buildStatus(TStatusCode.SRM_FAILURE, "Internal error: " + e.getMessage())); logRequestOutcome(outputData.getReturnStatus(), in); @@ -145,9 +124,8 @@ public OutputData handleNoSURLsFound(InputData in) { log.info("No SURLs found in the DB. Request failed"); - TReturnStatus returnStatus = CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_REQUEST, - "No SURLs found matching user, input request token or list of SURLs."); + TReturnStatus returnStatus = CommandHelper.buildStatus(TStatusCode.SRM_INVALID_REQUEST, + "No SURLs found matching user, input request token or list of SURLs."); logRequestOutcome(returnStatus, in); @@ -161,9 +139,8 @@ private boolean isAnonymousRequest(InputData inputData) { } /** - * Does a ReleaseFiles. Used to release pins on the previously requested - * "copies" (or "state") of the SURL. This function normally follows a - * srmPrepareToGet or srmBringOnline functions. + * Does a ReleaseFiles. Used to release pins on the previously requested "copies" (or "state") of + * the SURL. This function normally follows a srmPrepareToGet or srmBringOnline functions. */ public OutputData execute(InputData inputData) { @@ -177,7 +154,7 @@ public OutputData execute(InputData inputData) { if (!validInputData(inputData)) { throw new IllegalArgumentException( - "Release files: invalid argument type: " + inputData.getClass()); + "Release files: invalid argument type: " + inputData.getClass()); } Map surlStatuses = null; @@ -195,13 +172,11 @@ public OutputData execute(InputData inputData) { try { if (token == null) { - surlStatuses = checker.getPinnedSURLsForUser(user, - getSURLListFromInputData(inputData)); + surlStatuses = checker.getPinnedSURLsForUser(user, getSURLListFromInputData(inputData)); } else { - surlStatuses = checker - .getSURLStatuses(user, getTokenFromInputData(inputData), + surlStatuses = checker.getSURLStatuses(user, getTokenFromInputData(inputData), getSURLListFromInputData(inputData)); } @@ -217,14 +192,14 @@ public OutputData execute(InputData inputData) { return handleNoSURLsFound(inputData); } - ArrayOfTSURLReturnStatus surlReturnStatuses = prepareSurlsReturnStatus( - surlStatuses, getSURLListFromInputData(inputData)); + ArrayOfTSURLReturnStatus surlReturnStatuses = + prepareSurlsReturnStatus(surlStatuses, getSURLListFromInputData(inputData)); List surlToRelease = extractSurlToRelease(surlReturnStatuses); if (surlToRelease.isEmpty()) { - TReturnStatus returnStatus = CommandHelper.buildStatus( - TStatusCode.SRM_FAILURE, "No files released"); + TReturnStatus returnStatus = + CommandHelper.buildStatus(TStatusCode.SRM_FAILURE, "No files released"); logRequestOutcome(returnStatus, inputData); @@ -250,8 +225,8 @@ private OutputData handleAuthzError(InputData inputData, AuthzException e) { log.error(e.getMessage()); - TReturnStatus returnStatus = CommandHelper.buildStatus( - TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage()); + TReturnStatus returnStatus = + CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage()); logRequestOutcome(returnStatus, inputData); @@ -259,7 +234,7 @@ private OutputData handleAuthzError(InputData inputData, AuthzException e) { } private TReturnStatus buildStatus(InputData inputData, - ArrayOfTSURLReturnStatus surlReturnStatuses) { + ArrayOfTSURLReturnStatus surlReturnStatuses) { boolean atLeastOneReleased = false; boolean atLeastOneFailure = false; @@ -268,8 +243,7 @@ private TReturnStatus buildStatus(InputData inputData, printSurlOutcome(returnStatus, inputData); - if (returnStatus.getStatus().getStatusCode() - .equals(TStatusCode.SRM_SUCCESS)) { + if (returnStatus.getStatus().getStatusCode().equals(TStatusCode.SRM_SUCCESS)) { atLeastOneReleased = true; @@ -283,22 +257,19 @@ private TReturnStatus buildStatus(InputData inputData, if (atLeastOneReleased) { if (atLeastOneFailure) { return CommandHelper.buildStatus(TStatusCode.SRM_PARTIAL_SUCCESS, - "Check files status for details"); + "Check files status for details"); } else { - return CommandHelper.buildStatus(TStatusCode.SRM_SUCCESS, - "Files released"); + return CommandHelper.buildStatus(TStatusCode.SRM_SUCCESS, "Files released"); } } else { - return CommandHelper.buildStatus(TStatusCode.SRM_FAILURE, - "No files released"); + return CommandHelper.buildStatus(TStatusCode.SRM_FAILURE, "No files released"); } } - private ArrayOfTSURLReturnStatus prepareSurlsReturnStatus( - Map statuses, List surlsInRequest) { + private ArrayOfTSURLReturnStatus prepareSurlsReturnStatus(Map statuses, + List surlsInRequest) { - ArrayOfTSURLReturnStatus surlReturnStatuses = new ArrayOfTSURLReturnStatus( - statuses.size()); + ArrayOfTSURLReturnStatus surlReturnStatuses = new ArrayOfTSURLReturnStatus(statuses.size()); Collection surls; @@ -317,12 +288,10 @@ private ArrayOfTSURLReturnStatus prepareSurlsReturnStatus( returnStatus = prepareStatus(rs.getStatusCode()); } else { - returnStatus = CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, - "Invalid SURL"); + returnStatus = CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, "Invalid SURL"); } - surlReturnStatuses.addTSurlReturnStatus(CommandHelper.buildStatus(surl, - returnStatus)); + surlReturnStatuses.addTSurlReturnStatus(CommandHelper.buildStatus(surl, returnStatus)); } return surlReturnStatuses; @@ -335,18 +304,16 @@ private TReturnStatus prepareStatus(TStatusCode status) { } return CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, - "Not released because it is not pinned"); + "Not released because it is not pinned"); } - private List extractSurlToRelease( - ArrayOfTSURLReturnStatus surlReturnStatuses) { + private List extractSurlToRelease(ArrayOfTSURLReturnStatus surlReturnStatuses) { LinkedList surlToRelease = new LinkedList(); for (TSURLReturnStatus returnStatus : surlReturnStatuses.getArray()) { - if (TStatusCode.SRM_SUCCESS.equals(returnStatus.getStatus() - .getStatusCode())) { + if (TStatusCode.SRM_SUCCESS.equals(returnStatus.getStatus().getStatusCode())) { surlToRelease.add(returnStatus.getSurl()); } @@ -356,8 +323,7 @@ private List extractSurlToRelease( } /** - * Removes the Extended Attribute "pinned" from SURLs belonging to a - * filesystem with tape support. + * Removes the Extended Attribute "pinned" from SURLs belonging to a filesystem with tape support. * * @param surlToRelease */ @@ -369,13 +335,12 @@ private void removePinneExtendedAttribute(List surlToRelease) { try { - stori = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl); + stori = Namespace.getInstance().resolveStoRIbySURL(surl); } catch (Throwable e) { - log.warn(String.format( - "UNEXPECTED: Unable to build a stori for surl %s: %s", surl, - e.getMessage())); + log.warn(String.format("UNEXPECTED: Unable to build a stori for surl %s: %s", surl, + e.getMessage())); continue; } @@ -387,11 +352,10 @@ private void removePinneExtendedAttribute(List surlToRelease) { } } - private void printSurlOutcome(TSURLReturnStatus surlStatus, - InputData inputData) { + private void printSurlOutcome(TSURLReturnStatus surlStatus, InputData inputData) { - CommandHelper.printSurlOutcome(SRM_COMMAND, log, surlStatus.getStatus(), - inputData, surlStatus.getSurl()); + CommandHelper.printSurlOutcome(SRM_COMMAND, log, surlStatus.getStatus(), inputData, + surlStatus.getSurl()); } protected void logRequestOutcome(TReturnStatus status, InputData id) { @@ -402,8 +366,7 @@ protected void logRequestOutcome(TReturnStatus status, InputData id) { if (surls == null) { CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, id, token); } else { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, id, token, - toStringList(surls)); + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, id, token, toStringList(surls)); } } } diff --git a/src/main/java/it/grid/storm/synchcall/command/directory/LsCommand.java b/src/main/java/it/grid/storm/synchcall/command/directory/LsCommand.java index b865920de..7e34b8e80 100644 --- a/src/main/java/it/grid/storm/synchcall/command/directory/LsCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/directory/LsCommand.java @@ -20,7 +20,6 @@ import it.grid.storm.catalogs.surl.SURLStatusManagerFactory; import it.grid.storm.checksum.ChecksumManager; import it.grid.storm.common.SRMConstants; -import it.grid.storm.common.types.SizeUnit; import it.grid.storm.filesystem.FSException; import it.grid.storm.filesystem.FilesystemPermission; import it.grid.storm.filesystem.LocalFile; @@ -30,9 +29,8 @@ import it.grid.storm.namespace.InvalidDescendantsFileRequestException; import it.grid.storm.namespace.InvalidDescendantsPathRequestException; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; import it.grid.storm.srm.types.ArrayOfSURLs; @@ -84,13 +82,13 @@ public class LsCommand extends DirectoryCommand implements Command { private static final String SRM_COMMAND = "srmLs"; - private final NamespaceInterface namespace; + private final Namespace namespace; private boolean atLeastOneInputSURLIsDir; public LsCommand() { - namespace = NamespaceDirector.getNamespace(); + namespace = Namespace.getInstance(); } /** @@ -619,10 +617,10 @@ private void populateDetailFromFS(StoRI element, TMetaDataPathDetail elementDeta TSizeInBytes size = TSizeInBytes.makeEmpty(); try { if (!(localElement.isDirectory())) { - size = TSizeInBytes.make(localElement.getExactSize(), SizeUnit.BYTES); + size = TSizeInBytes.make(localElement.getExactSize()); log.debug("srmLs: Extracting size for {}. Size: {}", localElement.getPath(), size); } else { - size = TSizeInBytes.make(0, SizeUnit.BYTES); + size = TSizeInBytes.make(0); } } catch (InvalidTSizeAttributesException ex) { log.error("srmLs: Unable to create the size of file.", ex); diff --git a/src/main/java/it/grid/storm/synchcall/command/directory/MkdirCommand.java b/src/main/java/it/grid/storm/synchcall/command/directory/MkdirCommand.java index 32f313532..fafbb7c76 100644 --- a/src/main/java/it/grid/storm/synchcall/command/directory/MkdirCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/directory/MkdirCommand.java @@ -30,16 +30,15 @@ import it.grid.storm.authz.SpaceAuthzInterface; import it.grid.storm.authz.path.model.SRMFileRequest; import it.grid.storm.authz.sa.model.SRMSpaceRequest; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; import it.grid.storm.filesystem.FilesystemPermission; import it.grid.storm.filesystem.LocalFile; import it.grid.storm.griduser.CannotMapUserException; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.LocalUser; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; import it.grid.storm.namespace.model.ACLEntry; @@ -104,14 +103,14 @@ public class MkdirCommand extends DirectoryCommand implements Command { private static final String SRM_COMMAND = "SrmMkdir"; - private final NamespaceInterface namespace; - private final Configuration configuration; + private final Namespace namespace; + private final StormConfiguration configuration; private final AclManager aclManager; public MkdirCommand() { - namespace = NamespaceDirector.getNamespace(); - configuration = Configuration.getInstance(); + namespace = Namespace.getInstance(); + configuration = StormConfiguration.getInstance(); aclManager = AclManagerFS.getInstance(); } diff --git a/src/main/java/it/grid/storm/synchcall/command/directory/MvCommand.java b/src/main/java/it/grid/storm/synchcall/command/directory/MvCommand.java index d4d07ffdd..68449a266 100644 --- a/src/main/java/it/grid/storm/synchcall/command/directory/MvCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/directory/MvCommand.java @@ -4,6 +4,11 @@ */ package it.grid.storm.synchcall.command.directory; +import java.util.Arrays; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.acl.AclManagerFS; import it.grid.storm.authz.AuthzDecision; import it.grid.storm.authz.AuthzDirector; @@ -17,9 +22,8 @@ import it.grid.storm.griduser.CannotMapUserException; import it.grid.storm.griduser.LocalUser; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; import it.grid.storm.space.SpaceHelper; @@ -38,581 +42,501 @@ import it.grid.storm.synchcall.data.directory.MvInputData; import it.grid.storm.synchcall.data.directory.MvOutputData; -import java.util.Arrays; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class is part of the StoRM project. Copyright: Copyright (c) 2008 - * Company: INFN-CNAF and ICTP/EGRID project This class implements the SrmMv - * Command. - * - * @author lucamag - * @date May 28, 2008 - */ - public class MvCommand extends DirectoryCommand implements Command { public static final Logger log = LoggerFactory.getLogger(MvCommand.class); - private static final String SRM_COMMAND = "SrmMv"; - private final NamespaceInterface namespace; - - public MvCommand() { - - namespace = NamespaceDirector.getNamespace(); - - } - - /** - * Method that provide SrmMv functionality. - * - * @param inputData - * Contains information about input data for Mv request. - * @return outputData Contains output data - */ - public OutputData execute(InputData data) { - - log.debug("srmMv: Start execution."); - MvOutputData outputData = new MvOutputData(); - MvInputData inputData = (MvInputData) data; - - /** - * Validate MvInputData. The check is done at this level to separate - * internal StoRM logic from xmlrpc specific operation. - */ - - if ((inputData == null) || (inputData.getFromSURL() == null) - || (inputData.getToSURL() == null)) { - outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_FAILURE, - "Invalid parameter specified.")); - log.warn("srmMv: Request failed with [status: {}]", - outputData.getStatus()); - - return outputData; - } - - TSURL fromSURL = inputData.getFromSURL(); - - if (fromSURL.isEmpty()) { - log.warn("srmMv: unable to perform the operation, empty fromSurl"); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, "Invalid fromSURL specified!")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - - TSURL toSURL = inputData.getToSURL(); - - if (toSURL.isEmpty()) { - log.error("srmMv: unable to perform the operation, empty toSurl"); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, "Invalid toSURL specified!")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - - StoRI fromStori = null; - try { - if (inputData instanceof IdentityInputData) { - try { - fromStori = namespace.resolveStoRIbySURL(fromSURL, - ((IdentityInputData) inputData).getUser()); - } catch (UnapprochableSurlException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - fromSURL, - DataHelper.getRequestor(inputData), - e.getMessage()); - - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (NamespaceException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - fromSURL, - DataHelper.getRequestor(inputData), - e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (InvalidSURLException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - fromSURL, - DataHelper.getRequestor(inputData), - e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - } - } else { - try { - fromStori = namespace.resolveStoRIbySURL(fromSURL); - } catch (UnapprochableSurlException e) { - log.info("srmMv: Unable to build a stori for surl {}. {}",fromSURL - ,e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (NamespaceException e) { - log.info("srmMv: Unable to build a stori for surl {}. {}",fromSURL - ,e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (InvalidSURLException e) { - log.info("srmMv: Unable to build a stori for surl {}. {}",fromSURL - ,e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - } - } catch (IllegalArgumentException e) { - log.warn("srmMv: Unable to build StoRI by SURL: {}. {}", fromSURL - ,e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_REQUEST, "Unable to build StoRI by SURL")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - - StoRI toStori = null;; - try { - if (inputData instanceof IdentityInputData) { - try { - toStori = namespace.resolveStoRIbySURL(toSURL, - ((IdentityInputData) inputData).getUser()); - } catch (UnapprochableSurlException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - fromSURL, - DataHelper.getRequestor(inputData), - e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (NamespaceException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - fromSURL, - DataHelper.getRequestor(inputData), - e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (InvalidSURLException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - fromSURL, - DataHelper.getRequestor(inputData), - e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - } else { - try { - toStori = namespace.resolveStoRIbySURL(toSURL); - } catch (UnapprochableSurlException e) { - log.info("srmMv: Unable to build a stori for surl {}. {}",fromSURL - ,e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (NamespaceException e) { - log.info("srmMv: Unable to build a stori for surl {}. {}",fromSURL - ,e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (InvalidSURLException e) { - log.info("srmMv: Unable to build a stori for surl {}. {}",fromSURL - ,e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - } - } catch (IllegalArgumentException e) { - log.error("srmMv: Unable to build StoRI by SURL: {}. {}", fromSURL - ,e.getMessage(),e); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INTERNAL_ERROR, - "Unable to build StoRI by destination SURL")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - - TSpaceToken token = new SpaceHelper().getTokenFromStoRI(log, fromStori); - SpaceAuthzInterface spaceAuth = AuthzDirector.getSpaceAuthz(token); - - boolean isSpaceAuthorized; - if (inputData instanceof IdentityInputData) { - isSpaceAuthorized = spaceAuth.authorize( - ((IdentityInputData) inputData).getUser(), SRMSpaceRequest.MV); - } else { - isSpaceAuthorized = spaceAuth.authorizeAnonymous(SRMSpaceRequest.MV); - } - if (!isSpaceAuthorized) { - log.debug("srmMv: User not authorized to perform srmMv on SA: {}", token); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_AUTHORIZATION_FAILURE, - ": User not authorized to perform srmMv on SA: " + token)); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - - if (fromStori.getLocalFile().getPath() - .compareTo(toStori.getLocalFile().getPath()) == 0) { - outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_SUCCESS, - "Source SURL and target SURL are the same file.")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - - if (toStori.getLocalFile().exists()) { - if (toStori.getLocalFile().isDirectory()) { - try { - toStori = buildDestinationStoryForFolder(toSURL, fromStori, data); - } catch (IllegalArgumentException e) { - log.debug("srmMv : Unable to build StoRI for SURL {}. {}", - toSURL, e.getMessage()); - - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INTERNAL_ERROR, "Unable to build StoRI by SURL")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (UnapprochableSurlException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - toSURL, DataHelper.getRequestor(inputData),e.getMessage()); - - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (InvalidTSURLAttributesException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - toSURL, DataHelper.getRequestor(inputData),e.getMessage()); - - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, "Invalid toSURL specified!")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (NamespaceException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - toSURL, DataHelper.getRequestor(inputData),e.getMessage()); - - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (InvalidSURLException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - toSURL, DataHelper.getRequestor(inputData),e.getMessage()); - - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - } else { - log.debug("srmMv : destination SURL {} already exists.", toSURL); - outputData.setStatus(CommandHelper - .buildStatus(TStatusCode.SRM_DUPLICATION_ERROR, - "destination SURL already exists!")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - } - - AuthzDecision sourceDecision; - if (inputData instanceof IdentityInputData) { - sourceDecision = AuthzDirector.getPathAuthz().authorize( - ((IdentityInputData) inputData).getUser(), SRMFileRequest.MV_source, - fromStori, toStori); - } else { - sourceDecision = AuthzDirector.getPathAuthz().authorizeAnonymous( - SRMFileRequest.MV_source, fromStori, toStori); - } - AuthzDecision destinationDecision; - if (inputData instanceof IdentityInputData) { - destinationDecision = AuthzDirector.getPathAuthz().authorize( - ((IdentityInputData) inputData).getUser(), SRMFileRequest.MV_dest, - fromStori, toStori); - } else { - destinationDecision = AuthzDirector.getPathAuthz().authorizeAnonymous( - SRMFileRequest.MV_dest, fromStori, toStori); - } - TReturnStatus returnStatus; - if ((sourceDecision.equals(AuthzDecision.PERMIT)) - && (destinationDecision.equals(AuthzDecision.PERMIT))) { - - log.debug("SrmMv: Mv authorized for user {}. Source: {}. Target: {}", - DataHelper.getRequestor(inputData), - fromStori.getPFN(), - toStori.getPFN()); - - returnStatus = manageAuthorizedMV(fromStori, toStori.getLocalFile()); - if (returnStatus.isSRM_SUCCESS()) { - LocalUser user = null; - if (inputData instanceof IdentityInputData) { - try { - user = ((IdentityInputData) inputData).getUser().getLocalUser(); - } catch (CannotMapUserException e) { - log - .warn("srmMv: user mapping error {}", e.getMessage()); - - if (log.isDebugEnabled()){ - log.error(e.getMessage(),e); - } - - returnStatus - .extendExplaination("unable to set user acls on the destination file"); - } - } - if (user != null) { - setAcl(fromStori, toStori, user); - } else { - setAcl(fromStori, toStori); - } - } else { - log.warn("srmMv: <{}> Request for [fromSURL={}; toSURL={}] failed with [status: {}]", - DataHelper.getRequestor(inputData), - fromSURL, - toSURL, - returnStatus); - } - } else { - - String errorMsg = "Authorization error"; - - if (sourceDecision.equals(AuthzDecision.PERMIT)) { - errorMsg = - "User is not authorized to create and/or write the destination file"; - } else { - if (destinationDecision.equals(AuthzDecision.PERMIT)) { - errorMsg = - "User is not authorized to read and/or delete the source file"; - } else { - errorMsg = - "User is neither authorized to read and/or delete the source file " - + "nor to create and/or write the destination file"; - } - } - - returnStatus = - CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, - errorMsg); - } - outputData.setStatus(returnStatus); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - - private StoRI buildDestinationStoryForFolder(TSURL toSURL, StoRI fromStori, - InputData inputData) throws IllegalArgumentException, - InvalidTSURLAttributesException, UnapprochableSurlException, - NamespaceException, InvalidSURLException { - - StoRI toStori; - String toSURLString = toSURL.getSURLString(); - if (!(toSURLString.endsWith("/"))) { - toSURLString += "/"; - } - toSURLString += fromStori.getFilename(); - log.debug("srmMv: New toSURL: {}", toSURLString); - if (inputData instanceof IdentityInputData) { - toStori = namespace.resolveStoRIbySURL( - TSURL.makeFromStringValidate(toSURLString), - ((IdentityInputData) inputData).getUser()); - } else { - toStori = namespace.resolveStoRIbySURL(TSURL - .makeFromStringValidate(toSURLString)); - } - return toStori; - } - - private void setAcl(StoRI oldFileStoRI, StoRI newFileStoRI) { - - try { - AclManagerFS.getInstance().moveHttpsPermissions( - oldFileStoRI.getLocalFile(), newFileStoRI.getLocalFile()); - } catch (IllegalArgumentException e) { - log - .error("Unable to move permissions from the old to the new file.{}", - e.getMessage(), e); - } - } - - private void setAcl(StoRI oldFileStoRI, StoRI newFileStoRI, - LocalUser localUser) { - - setAcl(oldFileStoRI, newFileStoRI); - if (newFileStoRI.hasJustInTimeACLs()) { - // JiT - try { - AclManagerFS.getInstance().grantHttpsUserPermission( - newFileStoRI.getLocalFile(), localUser, - FilesystemPermission.ReadWrite); - } catch (IllegalArgumentException e) { - log - .error("Unable to grant user read and write permission on file. {}", - e.getMessage(), - e); - } - } else { - // AoT - try { - AclManagerFS.getInstance().grantHttpsGroupPermission( - newFileStoRI.getLocalFile(), localUser, - FilesystemPermission.ReadWrite); - } catch (IllegalArgumentException e) { - log - .error("Unable to grant group read and write permission on file. {}" - ,e.getMessage(),e); - } - } - } - - /** - * Split PFN , recursive creation is not supported, as reported at page 16 of - * Srm v2.1 spec. - * - * @param user - * VomsGridUser - * @param LocalFile - * fromFile - * @param LocalFile - * toFile - * @return TReturnStatus - */ - private TReturnStatus manageAuthorizedMV(StoRI fromStori, LocalFile toFile) { - - boolean creationDone; - - String explanation = ""; - TStatusCode statusCode = TStatusCode.EMPTY; - - LocalFile fromFile = fromStori.getLocalFile(); - LocalFile toParent = toFile.getParentFile(); - - /* - * Controllare che File sorgente esiste Esiste directory destinazione(che - * esista e sia directory) Non esiste file deestinazione - */ - - boolean sourceExists = false; - boolean targetDirExists = false; - boolean targetFileExists = false; - - if (fromFile != null) { - sourceExists = fromFile.exists(); - } - - if (toParent != null) { - targetDirExists = toParent.exists() && toParent.isDirectory(); - } - - if (toFile != null) { - targetFileExists = toFile.exists(); - } - - if (sourceExists && targetDirExists && !targetFileExists) { - - SURLStatusManager checker = SURLStatusManagerFactory - .newSURLStatusManager(); - - if(checker.isSURLBusy(fromStori.getSURL())){ - log - .debug("srmMv request failure: fromSURL is busy."); - explanation = "There is an active SrmPrepareToPut on from SURL."; - return CommandHelper - .buildStatus(TStatusCode.SRM_FILE_BUSY, explanation); - } - - /** - * Check if there is an active SrmPrepareToGet on the source SURL. In that - * case SrmMv() fails with SRM_FILE_BUSY. - */ - - if (checker.isSURLPinned(fromStori.getSURL())){ - log - .debug("SrmMv: requests fails because the source SURL is being used from other requests."); - explanation = "There is an active SrmPrepareToGet on from SURL"; - return CommandHelper - .buildStatus(TStatusCode.SRM_FILE_BUSY, explanation); - } - - /** - * Perform the SrmMv() operation. - */ - creationDone = fromFile.renameTo(toFile.getPath()); - - if (creationDone) { - log.debug("SrmMv: Request success!"); - explanation = "SURL moved with success"; - statusCode = TStatusCode.SRM_SUCCESS; - } else { - log.debug("SrmMv: Requests fails because the path is invalid."); - explanation = "Invalid path"; - statusCode = TStatusCode.SRM_INVALID_PATH; - } - - } else { - if (!sourceExists) { // and it is a file - log - .debug("SrmMv: request fails because the source SURL does not exists!"); - explanation = "Source SURL does not exists!"; - statusCode = TStatusCode.SRM_INVALID_PATH; - } else { - if (!targetDirExists) { - log - .debug("SrmMv: request fails because the target directory does not exitts."); - explanation = "Target directory does not exits!"; - statusCode = TStatusCode.SRM_INVALID_PATH; - } else { - if (targetFileExists) { - log.debug("SrmMv: request fails because the target SURL exists."); - explanation = "Target SURL exists!"; - statusCode = TStatusCode.SRM_DUPLICATION_ERROR; - } else { - log.debug("SrmMv request failure! That is a BUG!"); - explanation = "That is a bug!"; - statusCode = TStatusCode.SRM_INTERNAL_ERROR; - } - } - } - } - - return CommandHelper.buildStatus(statusCode, explanation); - } - - private void printRequestOutcome(TReturnStatus status, MvInputData inputData) { - - if (inputData != null) { - if (inputData.getFromSURL() != null && inputData.getToSURL() != null) { - CommandHelper.printRequestOutcome( - SRM_COMMAND, - log, - status, - inputData, - Arrays.asList(new String[] { inputData.getFromSURL().toString(), - inputData.getFromSURL().toString() })); - } else { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData); - } - } else { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); - } - } + private static final String SRM_COMMAND = "SrmMv"; + private final Namespace namespace; + + public MvCommand() { + + namespace = Namespace.getInstance(); + + } + + /** + * Method that provide SrmMv functionality. + * + * @param inputData Contains information about input data for Mv request. + * @return outputData Contains output data + */ + public OutputData execute(InputData data) { + + log.debug("srmMv: Start execution."); + MvOutputData outputData = new MvOutputData(); + MvInputData inputData = (MvInputData) data; + + /** + * Validate MvInputData. The check is done at this level to separate internal StoRM logic from + * XMLRPC specific operation. + */ + + if ((inputData == null) || (inputData.getFromSURL() == null) + || (inputData.getToSURL() == null)) { + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_FAILURE, "Invalid parameter specified.")); + log.warn("srmMv: Request failed with [status: {}]", outputData.getStatus()); + + return outputData; + } + + TSURL fromSURL = inputData.getFromSURL(); + + if (fromSURL.isEmpty()) { + log.warn("srmMv: unable to perform the operation, empty fromSurl"); + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, "Invalid fromSURL specified!")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + + TSURL toSURL = inputData.getToSURL(); + + if (toSURL.isEmpty()) { + log.error("srmMv: unable to perform the operation, empty toSurl"); + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, "Invalid toSURL specified!")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + + StoRI fromStori = null; + try { + if (inputData instanceof IdentityInputData) { + try { + fromStori = + namespace.resolveStoRIbySURL(fromSURL, ((IdentityInputData) inputData).getUser()); + } catch (UnapprochableSurlException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", fromSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (NamespaceException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", fromSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (InvalidSURLException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", fromSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + } + } else { + try { + fromStori = namespace.resolveStoRIbySURL(fromSURL); + } catch (UnapprochableSurlException e) { + log.info("srmMv: Unable to build a stori for surl {}. {}", fromSURL, e.getMessage()); + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (NamespaceException e) { + log.info("srmMv: Unable to build a stori for surl {}. {}", fromSURL, e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (InvalidSURLException e) { + log.info("srmMv: Unable to build a stori for surl {}. {}", fromSURL, e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + } + } catch (IllegalArgumentException e) { + log.warn("srmMv: Unable to build StoRI by SURL: {}. {}", fromSURL, e.getMessage()); + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INVALID_REQUEST, + "Unable to build StoRI by SURL")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + + StoRI toStori = null;; + try { + if (inputData instanceof IdentityInputData) { + try { + toStori = namespace.resolveStoRIbySURL(toSURL, ((IdentityInputData) inputData).getUser()); + } catch (UnapprochableSurlException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", fromSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (NamespaceException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", fromSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (InvalidSURLException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", fromSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + } else { + try { + toStori = namespace.resolveStoRIbySURL(toSURL); + } catch (UnapprochableSurlException e) { + log.info("srmMv: Unable to build a stori for surl {}. {}", fromSURL, e.getMessage()); + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (NamespaceException e) { + log.info("srmMv: Unable to build a stori for surl {}. {}", fromSURL, e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (InvalidSURLException e) { + log.info("srmMv: Unable to build a stori for surl {}. {}", fromSURL, e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + } + } catch (IllegalArgumentException e) { + log.error("srmMv: Unable to build StoRI by SURL: {}. {}", fromSURL, e.getMessage(), e); + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, + "Unable to build StoRI by destination SURL")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + + TSpaceToken token = new SpaceHelper().getTokenFromStoRI(log, fromStori); + SpaceAuthzInterface spaceAuth = AuthzDirector.getSpaceAuthz(token); + + boolean isSpaceAuthorized; + if (inputData instanceof IdentityInputData) { + isSpaceAuthorized = + spaceAuth.authorize(((IdentityInputData) inputData).getUser(), SRMSpaceRequest.MV); + } else { + isSpaceAuthorized = spaceAuth.authorizeAnonymous(SRMSpaceRequest.MV); + } + if (!isSpaceAuthorized) { + log.debug("srmMv: User not authorized to perform srmMv on SA: {}", token); + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, + ": User not authorized to perform srmMv on SA: " + token)); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + + if (fromStori.getLocalFile().getPath().compareTo(toStori.getLocalFile().getPath()) == 0) { + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_SUCCESS, + "Source SURL and target SURL are the same file.")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + + if (toStori.getLocalFile().exists()) { + if (toStori.getLocalFile().isDirectory()) { + try { + toStori = buildDestinationStoryForFolder(toSURL, fromStori, data); + } catch (IllegalArgumentException e) { + log.debug("srmMv : Unable to build StoRI for SURL {}. {}", toSURL, e.getMessage()); + + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, + "Unable to build StoRI by SURL")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (UnapprochableSurlException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", toSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (InvalidTSURLAttributesException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", toSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, "Invalid toSURL specified!")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (NamespaceException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", toSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (InvalidSURLException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", toSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + } else { + log.debug("srmMv : destination SURL {} already exists.", toSURL); + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_DUPLICATION_ERROR, + "destination SURL already exists!")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + } + + AuthzDecision sourceDecision; + if (inputData instanceof IdentityInputData) { + sourceDecision = AuthzDirector.getPathAuthz() + .authorize(((IdentityInputData) inputData).getUser(), SRMFileRequest.MV_source, fromStori, + toStori); + } else { + sourceDecision = AuthzDirector.getPathAuthz() + .authorizeAnonymous(SRMFileRequest.MV_source, fromStori, toStori); + } + AuthzDecision destinationDecision; + if (inputData instanceof IdentityInputData) { + destinationDecision = AuthzDirector.getPathAuthz() + .authorize(((IdentityInputData) inputData).getUser(), SRMFileRequest.MV_dest, fromStori, + toStori); + } else { + destinationDecision = AuthzDirector.getPathAuthz() + .authorizeAnonymous(SRMFileRequest.MV_dest, fromStori, toStori); + } + TReturnStatus returnStatus; + if ((sourceDecision.equals(AuthzDecision.PERMIT)) + && (destinationDecision.equals(AuthzDecision.PERMIT))) { + + log.debug("SrmMv: Mv authorized for user {}. Source: {}. Target: {}", + DataHelper.getRequestor(inputData), fromStori.getPFN(), toStori.getPFN()); + + returnStatus = manageAuthorizedMV(fromStori, toStori.getLocalFile()); + if (returnStatus.isSRM_SUCCESS()) { + LocalUser user = null; + if (inputData instanceof IdentityInputData) { + try { + user = ((IdentityInputData) inputData).getUser().getLocalUser(); + } catch (CannotMapUserException e) { + log.warn("srmMv: user mapping error {}", e.getMessage()); + + if (log.isDebugEnabled()) { + log.error(e.getMessage(), e); + } + + returnStatus.extendExplaination("unable to set user acls on the destination file"); + } + } + if (user != null) { + setAcl(fromStori, toStori, user); + } else { + setAcl(fromStori, toStori); + } + } else { + log.warn("srmMv: <{}> Request for [fromSURL={}; toSURL={}] failed with [status: {}]", + DataHelper.getRequestor(inputData), fromSURL, toSURL, returnStatus); + } + } else { + + String errorMsg = "Authorization error"; + + if (sourceDecision.equals(AuthzDecision.PERMIT)) { + errorMsg = "User is not authorized to create and/or write the destination file"; + } else { + if (destinationDecision.equals(AuthzDecision.PERMIT)) { + errorMsg = "User is not authorized to read and/or delete the source file"; + } else { + errorMsg = "User is neither authorized to read and/or delete the source file " + + "nor to create and/or write the destination file"; + } + } + + returnStatus = CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, errorMsg); + } + outputData.setStatus(returnStatus); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + + private StoRI buildDestinationStoryForFolder(TSURL toSURL, StoRI fromStori, InputData inputData) + throws IllegalArgumentException, InvalidTSURLAttributesException, UnapprochableSurlException, + NamespaceException, InvalidSURLException { + + StoRI toStori; + String toSURLString = toSURL.getSURLString(); + if (!(toSURLString.endsWith("/"))) { + toSURLString += "/"; + } + toSURLString += fromStori.getFilename(); + log.debug("srmMv: New toSURL: {}", toSURLString); + if (inputData instanceof IdentityInputData) { + toStori = namespace.resolveStoRIbySURL(TSURL.makeFromStringValidate(toSURLString), + ((IdentityInputData) inputData).getUser()); + } else { + toStori = namespace.resolveStoRIbySURL(TSURL.makeFromStringValidate(toSURLString)); + } + return toStori; + } + + private void setAcl(StoRI oldFileStoRI, StoRI newFileStoRI) { + + try { + AclManagerFS.getInstance() + .moveHttpsPermissions(oldFileStoRI.getLocalFile(), newFileStoRI.getLocalFile()); + } catch (IllegalArgumentException e) { + log.error("Unable to move permissions from the old to the new file.{}", e.getMessage(), e); + } + } + + private void setAcl(StoRI oldFileStoRI, StoRI newFileStoRI, LocalUser localUser) { + + setAcl(oldFileStoRI, newFileStoRI); + if (newFileStoRI.hasJustInTimeACLs()) { + // JiT + try { + AclManagerFS.getInstance() + .grantHttpsUserPermission(newFileStoRI.getLocalFile(), localUser, + FilesystemPermission.ReadWrite); + } catch (IllegalArgumentException e) { + log.error("Unable to grant user read and write permission on file. {}", e.getMessage(), e); + } + } else { + // AoT + try { + AclManagerFS.getInstance() + .grantHttpsGroupPermission(newFileStoRI.getLocalFile(), localUser, + FilesystemPermission.ReadWrite); + } catch (IllegalArgumentException e) { + log.error("Unable to grant group read and write permission on file. {}", e.getMessage(), e); + } + } + } + + /** + * Split PFN , recursive creation is not supported, as reported at page 16 of Srm v2.1 spec. + * + * @param user VomsGridUser + * @param LocalFile fromFile + * @param LocalFile toFile + * @return TReturnStatus + */ + private TReturnStatus manageAuthorizedMV(StoRI fromStori, LocalFile toFile) { + + boolean creationDone; + + String explanation = ""; + TStatusCode statusCode = TStatusCode.EMPTY; + + LocalFile fromFile = fromStori.getLocalFile(); + LocalFile toParent = toFile.getParentFile(); + + /* + * Controllare che File sorgente esiste Esiste directory destinazione(che esista e sia + * directory) Non esiste file deestinazione + */ + + boolean sourceExists = false; + boolean targetDirExists = false; + boolean targetFileExists = false; + + if (fromFile != null) { + sourceExists = fromFile.exists(); + } + + if (toParent != null) { + targetDirExists = toParent.exists() && toParent.isDirectory(); + } + + if (toFile != null) { + targetFileExists = toFile.exists(); + } + + if (sourceExists && targetDirExists && !targetFileExists) { + + SURLStatusManager checker = SURLStatusManagerFactory.newSURLStatusManager(); + + if (checker.isSURLBusy(fromStori.getSURL())) { + log.debug("srmMv request failure: fromSURL is busy."); + explanation = "There is an active SrmPrepareToPut on from SURL."; + return CommandHelper.buildStatus(TStatusCode.SRM_FILE_BUSY, explanation); + } + + /** + * Check if there is an active SrmPrepareToGet on the source SURL. In that case SrmMv() fails + * with SRM_FILE_BUSY. + */ + + if (checker.isSURLPinned(fromStori.getSURL())) { + log.debug( + "SrmMv: requests fails because the source SURL is being used from other requests."); + explanation = "There is an active SrmPrepareToGet on from SURL"; + return CommandHelper.buildStatus(TStatusCode.SRM_FILE_BUSY, explanation); + } + + /** + * Perform the SrmMv() operation. + */ + creationDone = fromFile.renameTo(toFile.getPath()); + + if (creationDone) { + log.debug("SrmMv: Request success!"); + explanation = "SURL moved with success"; + statusCode = TStatusCode.SRM_SUCCESS; + } else { + log.debug("SrmMv: Requests fails because the path is invalid."); + explanation = "Invalid path"; + statusCode = TStatusCode.SRM_INVALID_PATH; + } + + } else { + if (!sourceExists) { // and it is a file + log.debug("SrmMv: request fails because the source SURL does not exists!"); + explanation = "Source SURL does not exists!"; + statusCode = TStatusCode.SRM_INVALID_PATH; + } else { + if (!targetDirExists) { + log.debug("SrmMv: request fails because the target directory does not exitts."); + explanation = "Target directory does not exits!"; + statusCode = TStatusCode.SRM_INVALID_PATH; + } else { + if (targetFileExists) { + log.debug("SrmMv: request fails because the target SURL exists."); + explanation = "Target SURL exists!"; + statusCode = TStatusCode.SRM_DUPLICATION_ERROR; + } else { + log.debug("SrmMv request failure! That is a BUG!"); + explanation = "That is a bug!"; + statusCode = TStatusCode.SRM_INTERNAL_ERROR; + } + } + } + } + + return CommandHelper.buildStatus(statusCode, explanation); + } + + private void printRequestOutcome(TReturnStatus status, MvInputData inputData) { + + if (inputData != null) { + if (inputData.getFromSURL() != null && inputData.getToSURL() != null) { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData, Arrays.asList( + new String[] {inputData.getFromSURL().toString(), inputData.getFromSURL().toString()})); + } else { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData); + } + } else { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); + } + } } diff --git a/src/main/java/it/grid/storm/synchcall/command/directory/RmCommand.java b/src/main/java/it/grid/storm/synchcall/command/directory/RmCommand.java index ec98d352b..2e90b980c 100644 --- a/src/main/java/it/grid/storm/synchcall/command/directory/RmCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/directory/RmCommand.java @@ -19,9 +19,8 @@ import it.grid.storm.filesystem.LocalFile; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; import it.grid.storm.srm.types.ArrayOfTSURLReturnStatus; @@ -49,24 +48,15 @@ public RmException(TStatusCode code, String message) { } } - -/** - * This class is part of the StoRM project. Copyright: Copyright (c) 2008 Company: INFN-CNAF and - * ICTP/EGRID project - * - * @author lucamag - * @date May 27, 2008 - */ - public class RmCommand implements Command { private static final String SRM_COMMAND = "srmRm"; private static final Logger log = LoggerFactory.getLogger(RmCommand.class); - private final NamespaceInterface namespace; + private final Namespace namespace; public RmCommand() { - namespace = NamespaceDirector.getNamespace(); + namespace = Namespace.getInstance(); } @@ -192,7 +182,7 @@ private TReturnStatus removeFile(TSURL surl, GridUserInterface user, RmInputData returnStatus = new TReturnStatus(TStatusCode.SRM_SUCCESS, "File removed"); try { - NamespaceDirector.getNamespace().resolveVFSbyLocalFile(localFile).decreaseUsedSpace(fileSize); + Namespace.getInstance().resolveVFSbyLocalFile(localFile).decreaseUsedSpace(fileSize); } catch (NamespaceException e) { log.error(e.getMessage()); returnStatus.extendExplaination("Unable to decrease used space: " + e.getMessage()); diff --git a/src/main/java/it/grid/storm/synchcall/command/directory/RmdirCommand.java b/src/main/java/it/grid/storm/synchcall/command/directory/RmdirCommand.java index e20166a11..8f08797e4 100644 --- a/src/main/java/it/grid/storm/synchcall/command/directory/RmdirCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/directory/RmdirCommand.java @@ -17,9 +17,8 @@ import it.grid.storm.filesystem.LocalFile; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.namespace.InvalidSURLException; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; import it.grid.storm.srm.types.SRMCommandException; @@ -47,285 +46,262 @@ public RmdirException(TStatusCode code, String message) { } } + class TSize { - - private long size; - - TSize(long size) { - this.size = size; - } - - public void add(long n) { - size += n; - } - - public void dec(long n) { - size -= n; - } - - public long get() { - return size; - } - + + private long size; + + TSize(long size) { + this.size = size; + } + + public void add(long n) { + size += n; + } + + public void dec(long n) { + size -= n; + } + + public long get() { + return size; + } + } -/** - * This class is part of the StoRM project. Copyright: Copyright (c) 2008 - * Company: INFN-CNAF and ICTP/EGRID project - * - * @author lucamag - * @date May 27, 2008 - */ public class RmdirCommand extends DirectoryCommand implements Command { - - public static final Logger log = LoggerFactory.getLogger(RmdirCommand.class); - private static final String SRM_COMMAND = "srmRmdir"; - private final NamespaceInterface namespace; - - public RmdirCommand() { - - namespace = NamespaceDirector.getNamespace(); - - } - - /** - * Method that provide SrmRmdir functionality. - * - * @param inputData - * Contains information about input data for Rmdir request. - * @return OutputData Contains output data - */ - public OutputData execute(InputData data) { - - RmdirOutputData outputData = null; - log.debug("SrmRmdir: Start execution."); - checkInputData(data); - outputData = doRmdir((RmdirInputData) data); - log.debug("srmRmdir return status: {}", outputData.getStatus()); - printRequestOutcome(outputData.getStatus(), (RmdirInputData) data); - return outputData; - - } - - private RmdirOutputData doRmdir(RmdirInputData data) { - - TSURL surl = null; - GridUserInterface user = null; - StoRI stori = null; - TReturnStatus returnStatus = null; - boolean recursion = false; - TSize size = new TSize(0); - - try { - surl = getSURL(data); - user = getUser(data); - recursion = isRecursive(data); - stori = resolveStoRI(surl, user); - checkUserAuthorization(stori, user); - log.debug("srmRmdir: rmdir authorized for {}. Dir={}. Recursive={}", - userToString(user), stori.getPFN(), recursion); - returnStatus = removeFolder(stori.getLocalFile(), recursion, size); - log.debug("srmRmdir: decrease used space of {} bytes", size.get()); - try { - decreaseUsedSpace(stori.getLocalFile(), size.get()); - } catch (NamespaceException e) { - log.error("srmRmdir: {}", e.getMessage()); - returnStatus.extendExplaination("Unable to decrease used space: " - + e.getMessage()); - } - } catch (RmdirException e) { - log.error("srmRmdir: {}", e.getMessage()); - returnStatus = e.getReturnStatus(); - } - - log.debug("srmRmdir: returned status is {}", returnStatus); - return new RmdirOutputData(returnStatus); - } - - private void checkInputData(InputData data) - throws IllegalArgumentException { - - if (data == null) { - throw new IllegalArgumentException("Invalid input data: NULL"); - } - if (!(data instanceof RmdirInputData)) { - throw new IllegalArgumentException("Invalid input data type"); - } - } - - private StoRI resolveStoRI(TSURL surl, GridUserInterface user) - throws RmdirException { - - String formatStr = "Unable to build a stori for surl {} for user {}: {}"; - try { - return namespace.resolveStoRIbySURL(surl, user); - } catch (UnapprochableSurlException e) { - log.error(formatStr, surl, userToString(user), e.getMessage()); - throw new RmdirException(TStatusCode.SRM_AUTHORIZATION_FAILURE, - e.getMessage()); - } catch (NamespaceException e) { - log.error(formatStr, surl, userToString(user), e.getMessage()); - throw new RmdirException(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage()); - } catch (InvalidSURLException e) { - log.error(formatStr, surl, userToString(user), e.getMessage()); - throw new RmdirException(TStatusCode.SRM_INVALID_PATH, e.getMessage()); - } catch (IllegalArgumentException e) { - log.error(formatStr, surl, userToString(user), e.getMessage()); - throw new RmdirException(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage()); - } - } - - private boolean isAnonymous(GridUserInterface user) { - - return (user == null); - } - - private String userToString(GridUserInterface user) { - - return isAnonymous(user) ? "anonymous" : user.getDn(); - } - - private void checkUserAuthorization(StoRI stori, GridUserInterface user) - throws RmdirException { - - TSpaceToken token = stori.getVirtualFileSystem().getSpaceToken(); - SpaceAuthzInterface spaceAuth = AuthzDirector.getSpaceAuthz(token); - - boolean isSpaceAuthorized; - if (isAnonymous(user)) { - isSpaceAuthorized = spaceAuth.authorizeAnonymous(SRMSpaceRequest.RMD); - } else { - isSpaceAuthorized = spaceAuth.authorize(user, SRMSpaceRequest.RMD); - } - if (!isSpaceAuthorized) { - log.debug("srmRmdir: User not authorized to perform srmRmdir request " - + "on the storage area: {}", token); - throw new RmdirException(TStatusCode.SRM_AUTHORIZATION_FAILURE, - "User is not authorized to remove the directory on the storage area " - + token); - } - - AuthzDecision decision; - if (isAnonymous(user)) { - decision = AuthzDirector.getPathAuthz().authorizeAnonymous( - SRMFileRequest.RMD, stori.getStFN()); - } else { - decision = AuthzDirector.getPathAuthz().authorize(user, - SRMFileRequest.RMD, stori); - } - if (!decision.equals(AuthzDecision.PERMIT)) { - log.debug("srmRmdir: User is not authorized to delete the directory"); - throw new RmdirException(TStatusCode.SRM_AUTHORIZATION_FAILURE, - "User is not authorized to remove the directory"); - } - return; - } - - private GridUserInterface getUser(InputData data) { - - if (data instanceof IdentityInputData) { - return ((IdentityInputData) data).getUser(); - } - return null; - } - - private TSURL getSURL(RmdirInputData data) throws RmdirException { - - TSURL surl = ((RmdirInputData) data).getSurl(); - if (surl == null) { - throw new RmdirException(TStatusCode.SRM_FAILURE, - "SURL specified is NULL"); - } - if (surl.isEmpty()) { - throw new RmdirException(TStatusCode.SRM_FAILURE, - "SURL specified is empty"); - } - return surl; - } - - private boolean isRecursive(RmdirInputData data) { - - return data.getRecursive().booleanValue(); - } - - private void decreaseUsedSpace(LocalFile localFile, long sizeToRemove) - throws NamespaceException { - - NamespaceDirector.getNamespace().resolveVFSbyLocalFile(localFile) - .decreaseUsedSpace(sizeToRemove); - } - - private TReturnStatus removeFolder(LocalFile dir, boolean recursive, TSize size) - throws RmdirException { - - /* - * Check if dir exists and is a directory, if recursion is enabled when - * directory is not empty, etc... - */ - - if (!dir.exists()) { - return new TReturnStatus(TStatusCode.SRM_INVALID_PATH, - "Directory does not exists"); - } - if (!dir.isDirectory()) { - return new TReturnStatus(TStatusCode.SRM_INVALID_PATH, "Not a directory"); - } - if (!recursive && (dir.listFiles().length > 0)) { - return new TReturnStatus(TStatusCode.SRM_NON_EMPTY_DIRECTORY, - "Directory is not empty"); - } - - if (recursive) { - LocalFile[] list = dir.listFiles(); - log.debug("srmRmdir: removing {} content", dir); - for (LocalFile element : list) { - log.debug("srmRmdir: removing {}", element); - if (element.isDirectory()) { - removeFolder(element, recursive, size); - } else { - removeFile(element, size); - } - } - } - log.debug("srmRmdir: removing {}", dir); - removeEmptyDirectory(dir, size); - return new TReturnStatus(TStatusCode.SRM_SUCCESS, "Directory removed with success!"); - } - - private void removeEmptyDirectory(LocalFile directory, TSize size) - throws RmdirException { - - removeFile(directory, size); - } - - private void removeFile(LocalFile file, TSize size) throws RmdirException { - - long fileSize = file.length(); - if (!file.delete()) { - log.error("srmRmdir: Unable to delete {}", file); - throw new RmdirException(TStatusCode.SRM_FAILURE, - "Unable to delete " + file.getAbsolutePath()); - } - size.add(fileSize); - } - - private void printRequestOutcome(TReturnStatus status, - RmdirInputData inputData) { - - if (inputData != null) { - if (inputData.getSurl() != null) { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData, - Arrays.asList(inputData.getSurl().toString())); - } else { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData); - } - - } else { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); - } - } - -} \ No newline at end of file + private static final String SRM_COMMAND = "srmRmdir"; + private final Namespace namespace; + + public RmdirCommand() { + + namespace = Namespace.getInstance(); + + } + + /** + * Method that provide SrmRmdir functionality. + * + * @param inputData Contains information about input data for Rmdir request. + * @return OutputData Contains output data + */ + public OutputData execute(InputData data) { + + RmdirOutputData outputData = null; + log.debug("SrmRmdir: Start execution."); + checkInputData(data); + outputData = doRmdir((RmdirInputData) data); + log.debug("srmRmdir return status: {}", outputData.getStatus()); + printRequestOutcome(outputData.getStatus(), (RmdirInputData) data); + return outputData; + + } + + private RmdirOutputData doRmdir(RmdirInputData data) { + + TSURL surl = null; + GridUserInterface user = null; + StoRI stori = null; + TReturnStatus returnStatus = null; + boolean recursion = false; + TSize size = new TSize(0); + + try { + surl = getSURL(data); + user = getUser(data); + recursion = isRecursive(data); + stori = resolveStoRI(surl, user); + checkUserAuthorization(stori, user); + log.debug("srmRmdir: rmdir authorized for {}. Dir={}. Recursive={}", userToString(user), + stori.getPFN(), recursion); + returnStatus = removeFolder(stori.getLocalFile(), recursion, size); + log.debug("srmRmdir: decrease used space of {} bytes", size.get()); + try { + decreaseUsedSpace(stori.getLocalFile(), size.get()); + } catch (NamespaceException e) { + log.error("srmRmdir: {}", e.getMessage()); + returnStatus.extendExplaination("Unable to decrease used space: " + e.getMessage()); + } + } catch (RmdirException e) { + log.error("srmRmdir: {}", e.getMessage()); + returnStatus = e.getReturnStatus(); + } + + log.debug("srmRmdir: returned status is {}", returnStatus); + return new RmdirOutputData(returnStatus); + } + + private void checkInputData(InputData data) throws IllegalArgumentException { + + if (data == null) { + throw new IllegalArgumentException("Invalid input data: NULL"); + } + if (!(data instanceof RmdirInputData)) { + throw new IllegalArgumentException("Invalid input data type"); + } + } + + private StoRI resolveStoRI(TSURL surl, GridUserInterface user) throws RmdirException { + + String formatStr = "Unable to build a stori for surl {} for user {}: {}"; + try { + return namespace.resolveStoRIbySURL(surl, user); + } catch (UnapprochableSurlException e) { + log.error(formatStr, surl, userToString(user), e.getMessage()); + throw new RmdirException(TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage()); + } catch (NamespaceException e) { + log.error(formatStr, surl, userToString(user), e.getMessage()); + throw new RmdirException(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage()); + } catch (InvalidSURLException e) { + log.error(formatStr, surl, userToString(user), e.getMessage()); + throw new RmdirException(TStatusCode.SRM_INVALID_PATH, e.getMessage()); + } catch (IllegalArgumentException e) { + log.error(formatStr, surl, userToString(user), e.getMessage()); + throw new RmdirException(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage()); + } + } + + private boolean isAnonymous(GridUserInterface user) { + + return (user == null); + } + + private String userToString(GridUserInterface user) { + + return isAnonymous(user) ? "anonymous" : user.getDn(); + } + + private void checkUserAuthorization(StoRI stori, GridUserInterface user) throws RmdirException { + + TSpaceToken token = stori.getVirtualFileSystem().getSpaceToken(); + SpaceAuthzInterface spaceAuth = AuthzDirector.getSpaceAuthz(token); + + boolean isSpaceAuthorized; + if (isAnonymous(user)) { + isSpaceAuthorized = spaceAuth.authorizeAnonymous(SRMSpaceRequest.RMD); + } else { + isSpaceAuthorized = spaceAuth.authorize(user, SRMSpaceRequest.RMD); + } + if (!isSpaceAuthorized) { + log.debug( + "srmRmdir: User not authorized to perform srmRmdir request " + "on the storage area: {}", + token); + throw new RmdirException(TStatusCode.SRM_AUTHORIZATION_FAILURE, + "User is not authorized to remove the directory on the storage area " + token); + } + + AuthzDecision decision; + if (isAnonymous(user)) { + decision = + AuthzDirector.getPathAuthz().authorizeAnonymous(SRMFileRequest.RMD, stori.getStFN()); + } else { + decision = AuthzDirector.getPathAuthz().authorize(user, SRMFileRequest.RMD, stori); + } + if (!decision.equals(AuthzDecision.PERMIT)) { + log.debug("srmRmdir: User is not authorized to delete the directory"); + throw new RmdirException(TStatusCode.SRM_AUTHORIZATION_FAILURE, + "User is not authorized to remove the directory"); + } + return; + } + + private GridUserInterface getUser(InputData data) { + + if (data instanceof IdentityInputData) { + return ((IdentityInputData) data).getUser(); + } + return null; + } + + private TSURL getSURL(RmdirInputData data) throws RmdirException { + + TSURL surl = ((RmdirInputData) data).getSurl(); + if (surl == null) { + throw new RmdirException(TStatusCode.SRM_FAILURE, "SURL specified is NULL"); + } + if (surl.isEmpty()) { + throw new RmdirException(TStatusCode.SRM_FAILURE, "SURL specified is empty"); + } + return surl; + } + + private boolean isRecursive(RmdirInputData data) { + + return data.getRecursive().booleanValue(); + } + + private void decreaseUsedSpace(LocalFile localFile, long sizeToRemove) throws NamespaceException { + + Namespace.getInstance().resolveVFSbyLocalFile(localFile).decreaseUsedSpace(sizeToRemove); + } + + private TReturnStatus removeFolder(LocalFile dir, boolean recursive, TSize size) + throws RmdirException { + + /* + * Check if dir exists and is a directory, if recursion is enabled when directory is not empty, + * etc... + */ + + if (!dir.exists()) { + return new TReturnStatus(TStatusCode.SRM_INVALID_PATH, "Directory does not exists"); + } + if (!dir.isDirectory()) { + return new TReturnStatus(TStatusCode.SRM_INVALID_PATH, "Not a directory"); + } + if (!recursive && (dir.listFiles().length > 0)) { + return new TReturnStatus(TStatusCode.SRM_NON_EMPTY_DIRECTORY, "Directory is not empty"); + } + + if (recursive) { + LocalFile[] list = dir.listFiles(); + log.debug("srmRmdir: removing {} content", dir); + for (LocalFile element : list) { + log.debug("srmRmdir: removing {}", element); + if (element.isDirectory()) { + removeFolder(element, recursive, size); + } else { + removeFile(element, size); + } + } + } + log.debug("srmRmdir: removing {}", dir); + removeEmptyDirectory(dir, size); + return new TReturnStatus(TStatusCode.SRM_SUCCESS, "Directory removed with success!"); + } + + private void removeEmptyDirectory(LocalFile directory, TSize size) throws RmdirException { + + removeFile(directory, size); + } + + private void removeFile(LocalFile file, TSize size) throws RmdirException { + + long fileSize = file.length(); + if (!file.delete()) { + log.error("srmRmdir: Unable to delete {}", file); + throw new RmdirException(TStatusCode.SRM_FAILURE, + "Unable to delete " + file.getAbsolutePath()); + } + size.add(fileSize); + } + + private void printRequestOutcome(TReturnStatus status, RmdirInputData inputData) { + + if (inputData != null) { + if (inputData.getSurl() != null) { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData, + Arrays.asList(inputData.getSurl().toString())); + } else { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData); + } + + } else { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); + } + } + +} diff --git a/src/main/java/it/grid/storm/synchcall/command/discovery/PingCommand.java b/src/main/java/it/grid/storm/synchcall/command/discovery/PingCommand.java index 9a85013c9..313a3be03 100644 --- a/src/main/java/it/grid/storm/synchcall/command/discovery/PingCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/discovery/PingCommand.java @@ -5,7 +5,8 @@ package it.grid.storm.synchcall.command.discovery; import it.grid.storm.Constants; -import it.grid.storm.config.Configuration; +import it.grid.storm.catalogs.TapeRecallCatalog; +import it.grid.storm.config.StormConfiguration; import it.grid.storm.persistence.model.TapeRecallTO; import it.grid.storm.srm.types.ArrayOfTExtraInfo; import it.grid.storm.srm.types.InvalidTExtraInfoAttributeException; @@ -17,7 +18,6 @@ import it.grid.storm.synchcall.data.OutputData; import it.grid.storm.synchcall.data.discovery.PingInputData; import it.grid.storm.synchcall.data.discovery.PingOutputData; -import it.grid.storm.tape.recalltable.TapeRecallCatalog; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -30,16 +30,6 @@ import java.util.Map.Entry; import java.util.Properties; -/** - * This class is part of the StoRM project. Copyright: Copyright (c) 2008 Company: INFN-CNAF and - * ICTP/EGRID project - * - * @author lucamag - * @author Alberto Forti - * @date May 28, 2008 - * - */ - public class PingCommand extends DiscoveryCommand implements Command { public static final Logger log = LoggerFactory.getLogger(PingCommand.class); @@ -109,7 +99,7 @@ private Properties loadProperties() { Properties properties = new Properties(); - Configuration config = Configuration.getInstance(); + StormConfiguration config = StormConfiguration.getInstance(); String configurationPATH = config.namespaceConfigPath(); String pingPropertiesFileName = config.getPingValuesPropertiesFilename(); String propertiesFile = configurationPATH + File.separator + pingPropertiesFileName; @@ -235,7 +225,7 @@ private ArrayOfTExtraInfo test_takeover(String param) { try { // Retrieve the Task - List tasks = new TapeRecallCatalog().takeoverNTasksWithDoubles(numbOfTask); + List tasks = TapeRecallCatalog.getInstance().takeoverNTasksWithDoubles(numbOfTask); if (tasks != null) { // Build the response diff --git a/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceMetaDataCommand.java b/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceMetaDataCommand.java index 255202a4c..04c7e1521 100644 --- a/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceMetaDataCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceMetaDataCommand.java @@ -31,189 +31,160 @@ import it.grid.storm.synchcall.data.space.GetSpaceMetaDataOutputData; import it.grid.storm.synchcall.data.space.IdentityGetSpaceMetaDataInputData; -/** - * This class is part of the StoRM project. Copyright: Copyright (c) 2008 - * Company: INFN-CNAF and ICTP/EGRID project - * - * This class represents the GetSpaceMetaDataManager Class. This class hava a - * reseveSpace method that perform all operation nedded to satisfy a SRM space - * release request. - * - * @author lucamag - * @date May 29, 2008 - * - */ - public class GetSpaceMetaDataCommand extends SpaceCommand implements Command { - public static final Logger log = LoggerFactory - .getLogger(GetSpaceMetaDataCommand.class); - - private ReservedSpaceCatalog catalog = null; - - private static final String SRM_COMMAND = "srmGetSpaceMetaData"; - - /** - * Constructor. Bind the Executor with ReservedSpaceCatalog - */ - - public GetSpaceMetaDataCommand() { - - catalog = new ReservedSpaceCatalog(); - } - - /** - * - * @param data - * GetSpaceMetaDataInputData - * @return GetSpaceMetaDataOutputData - */ - public OutputData execute(InputData indata) { - - log.debug(""); - log.debug(" Updating SA with GPFS quotas results"); - GPFSQuotaManager.INSTANCE.triggerComputeQuotas(); - - IdentityGetSpaceMetaDataInputData data; - if (indata instanceof IdentityInputData) { - data = (IdentityGetSpaceMetaDataInputData) indata; - } else { - GetSpaceMetaDataOutputData outputData = new GetSpaceMetaDataOutputData(); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_NOT_SUPPORTED, "Anonymous user can not perform" - + SRM_COMMAND)); - printRequestOutcome(outputData.getStatus(), - (GetSpaceMetaDataInputData) indata); - return outputData; - } - int errorCount = 0; - ArrayOfTMetaDataSpace arrayData = new ArrayOfTMetaDataSpace(); - TReturnStatus globalStatus = null; - - TMetaDataSpace metadata = null; - - for (TSpaceToken token : data.getSpaceTokenArray().getTSpaceTokenArray()) { - StorageSpaceData spaceData = null; - try { - spaceData = catalog.getStorageSpace(token); - } catch (TransferObjectDecodingException e) { - log.error("Error getting storage space data for token {}. {}", - token, e.getMessage(),e); - metadata = createFailureMetadata(token, TStatusCode.SRM_INTERNAL_ERROR, - "Error building space data from row DB data", data.getUser()); - errorCount++; - arrayData.addTMetaDataSpace(metadata); - continue; - - } catch (DataAccessException e) { - log.error("Error getting storage space data for token {}. {}", - token, e.getMessage(),e); - metadata = createFailureMetadata(token, TStatusCode.SRM_INTERNAL_ERROR, - "Error retrieving row space token data from DB", data.getUser()); - errorCount++; - arrayData.addTMetaDataSpace(metadata); - continue; - } - if (spaceData != null) { - if (!spaceData.isInitialized()) { - log.warn("Uninitialized storage data found for token {}", token); - metadata = createFailureMetadata(token, TStatusCode.SRM_FAILURE, - "Storage Space not initialized yet", data.getUser()); - errorCount++; - } else { - try { - metadata = new TMetaDataSpace(spaceData); - } catch (InvalidTMetaDataSpaceAttributeException e) { - log.error("Metadata error. {}", e.getMessage(), e); - metadata = createFailureMetadata(token, - TStatusCode.SRM_INTERNAL_ERROR, - "Error building Storage Space Metadata from row data", - data.getUser()); - errorCount++; - } catch (InvalidTSizeAttributesException e) { - log.error("Metadata error. {}", e.getMessage(), e); - metadata = createFailureMetadata(token, - TStatusCode.SRM_INTERNAL_ERROR, - "Error building Storage Space Metadata from row data", - data.getUser()); - errorCount++; - } - } - } else { - log.warn("Unable to retrieve space data for token {}.",token); - metadata = createFailureMetadata(token, - TStatusCode.SRM_INVALID_REQUEST, "Space Token not found", - data.getUser()); - errorCount++; - } - arrayData.addTMetaDataSpace(metadata); - } - - boolean requestSuccess = (errorCount == 0); - boolean requestFailure = (errorCount == data.getSpaceTokenArray().size()); - - if (requestSuccess) { - globalStatus = new TReturnStatus(TStatusCode.SRM_SUCCESS, ""); - - log.info("srmGetSpaceMetadata: user <{}> Request for [spaceTokens: {}] " - + "done succesfully with: [status: {}]", data.getUser(), - data.getSpaceTokenArray(), globalStatus); - - } else { - if (requestFailure) { - globalStatus = new TReturnStatus(TStatusCode.SRM_FAILURE, - "No valid space tokens"); - - log.info( - "srmGetSpaceMetadata: user <{}> Request for [spaceTokens: {}] " - + "failed with: [status: {}]", data.getUser(), - data.getSpaceTokenArray(), globalStatus); - - } else { - - globalStatus = new TReturnStatus(TStatusCode.SRM_PARTIAL_SUCCESS, - "Check space tokens statuses for details"); - - log.info( - "srmGetSpaceMetadata: user <{}> Request for [spaceTokens: {}] " - + "partially done with: [status: {}]", data.getUser(), - data.getSpaceTokenArray(), globalStatus); - - } - } - - GetSpaceMetaDataOutputData response = null; - try { - response = new GetSpaceMetaDataOutputData(globalStatus, arrayData); - } catch (InvalidGetSpaceMetaDataOutputAttributeException e) { - log.error(e.getMessage(),e); - } - return response; - } - - private TMetaDataSpace createFailureMetadata(TSpaceToken token, - TStatusCode statusCode, String message, GridUserInterface user) { - - TMetaDataSpace metadata = TMetaDataSpace.makeEmpty(); - metadata.setSpaceToken(token); - - try { - metadata.setStatus(new TReturnStatus(statusCode, message)); - } catch (IllegalArgumentException e) { - log.error(e.getMessage(),e); - } - - return metadata; - } - - private void printRequestOutcome(TReturnStatus status, - GetSpaceMetaDataInputData inputData) { - - if (inputData != null) { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData); - } else { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); - } - } + public static final Logger log = LoggerFactory.getLogger(GetSpaceMetaDataCommand.class); + + private ReservedSpaceCatalog catalog = null; + + private static final String SRM_COMMAND = "srmGetSpaceMetaData"; + + public GetSpaceMetaDataCommand() { + + catalog = ReservedSpaceCatalog.getInstance(); + } + + /** + * + * @param data GetSpaceMetaDataInputData + * @return GetSpaceMetaDataOutputData + */ + public OutputData execute(InputData indata) { + + log.debug(""); + log.debug(" Updating SA with GPFS quotas results"); + GPFSQuotaManager.INSTANCE.triggerComputeQuotas(); + + IdentityGetSpaceMetaDataInputData data; + if (indata instanceof IdentityInputData) { + data = (IdentityGetSpaceMetaDataInputData) indata; + } else { + GetSpaceMetaDataOutputData outputData = new GetSpaceMetaDataOutputData(); + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_NOT_SUPPORTED, + "Anonymous user can not perform" + SRM_COMMAND)); + printRequestOutcome(outputData.getStatus(), (GetSpaceMetaDataInputData) indata); + return outputData; + } + int errorCount = 0; + ArrayOfTMetaDataSpace arrayData = new ArrayOfTMetaDataSpace(); + TReturnStatus globalStatus = null; + + TMetaDataSpace metadata = null; + + for (TSpaceToken token : data.getSpaceTokenArray().getTSpaceTokenArray()) { + StorageSpaceData spaceData = null; + try { + spaceData = catalog.getStorageSpace(token); + } catch (TransferObjectDecodingException e) { + log.error("Error getting storage space data for token {}. {}", token, e.getMessage(), e); + metadata = createFailureMetadata(token, TStatusCode.SRM_INTERNAL_ERROR, + "Error building space data from row DB data", data.getUser()); + errorCount++; + arrayData.addTMetaDataSpace(metadata); + continue; + + } catch (DataAccessException e) { + log.error("Error getting storage space data for token {}. {}", token, e.getMessage(), e); + metadata = createFailureMetadata(token, TStatusCode.SRM_INTERNAL_ERROR, + "Error retrieving row space token data from DB", data.getUser()); + errorCount++; + arrayData.addTMetaDataSpace(metadata); + continue; + } + if (spaceData != null) { + if (!spaceData.isInitialized()) { + log.warn("Uninitialized storage data found for token {}", token); + metadata = createFailureMetadata(token, TStatusCode.SRM_FAILURE, + "Storage Space not initialized yet", data.getUser()); + errorCount++; + } else { + try { + metadata = new TMetaDataSpace(spaceData); + } catch (InvalidTMetaDataSpaceAttributeException e) { + log.error("Metadata error. {}", e.getMessage(), e); + metadata = createFailureMetadata(token, TStatusCode.SRM_INTERNAL_ERROR, + "Error building Storage Space Metadata from row data", data.getUser()); + errorCount++; + } catch (InvalidTSizeAttributesException e) { + log.error("Metadata error. {}", e.getMessage(), e); + metadata = createFailureMetadata(token, TStatusCode.SRM_INTERNAL_ERROR, + "Error building Storage Space Metadata from row data", data.getUser()); + errorCount++; + } + } + } else { + log.warn("Unable to retrieve space data for token {}.", token); + metadata = createFailureMetadata(token, TStatusCode.SRM_INVALID_REQUEST, + "Space Token not found", data.getUser()); + errorCount++; + } + arrayData.addTMetaDataSpace(metadata); + } + + boolean requestSuccess = (errorCount == 0); + boolean requestFailure = (errorCount == data.getSpaceTokenArray().size()); + + if (requestSuccess) { + globalStatus = new TReturnStatus(TStatusCode.SRM_SUCCESS, ""); + + log.info( + "srmGetSpaceMetadata: user <{}> Request for [spaceTokens: {}] " + + "done succesfully with: [status: {}]", + data.getUser(), data.getSpaceTokenArray(), globalStatus); + + } else { + if (requestFailure) { + globalStatus = new TReturnStatus(TStatusCode.SRM_FAILURE, "No valid space tokens"); + + log.info( + "srmGetSpaceMetadata: user <{}> Request for [spaceTokens: {}] " + + "failed with: [status: {}]", + data.getUser(), data.getSpaceTokenArray(), globalStatus); + + } else { + + globalStatus = new TReturnStatus(TStatusCode.SRM_PARTIAL_SUCCESS, + "Check space tokens statuses for details"); + + log.info( + "srmGetSpaceMetadata: user <{}> Request for [spaceTokens: {}] " + + "partially done with: [status: {}]", + data.getUser(), data.getSpaceTokenArray(), globalStatus); + + } + } + + GetSpaceMetaDataOutputData response = null; + try { + response = new GetSpaceMetaDataOutputData(globalStatus, arrayData); + } catch (InvalidGetSpaceMetaDataOutputAttributeException e) { + log.error(e.getMessage(), e); + } + return response; + } + + private TMetaDataSpace createFailureMetadata(TSpaceToken token, TStatusCode statusCode, + String message, GridUserInterface user) { + + TMetaDataSpace metadata = TMetaDataSpace.makeEmpty(); + metadata.setSpaceToken(token); + + try { + metadata.setStatus(new TReturnStatus(statusCode, message)); + } catch (IllegalArgumentException e) { + log.error(e.getMessage(), e); + } + + return metadata; + } + + private void printRequestOutcome(TReturnStatus status, GetSpaceMetaDataInputData inputData) { + + if (inputData != null) { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData); + } else { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); + } + } } diff --git a/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceTokensCommand.java b/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceTokensCommand.java index 4e488e2c7..78fd3194c 100644 --- a/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceTokensCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceTokensCommand.java @@ -22,18 +22,6 @@ import it.grid.storm.synchcall.data.space.IdentityGetSpaceTokensInputData; import it.grid.storm.synchcall.data.space.GetSpaceTokensOutputData; -/** - * This class is part of the StoRM project. Copyright: Copyright (c) 2008 - * Company: INFN-CNAF and ICTP/EGRID project * Execute the GetSpaceTokens - * request. - * - * @author lucamag - * @author Alberto Forti - * - * @date May 29, 2008 - * - */ - public class GetSpaceTokensCommand extends SpaceCommand implements Command { public static final Logger log = LoggerFactory @@ -44,7 +32,7 @@ public class GetSpaceTokensCommand extends SpaceCommand implements Command { public GetSpaceTokensCommand() { - catalog = new ReservedSpaceCatalog(); + catalog = ReservedSpaceCatalog.getInstance(); }; public OutputData execute(InputData data) { diff --git a/src/main/java/it/grid/storm/synchcall/command/space/ReleaseSpaceCommand.java b/src/main/java/it/grid/storm/synchcall/command/space/ReleaseSpaceCommand.java index 2c27474dc..025824182 100644 --- a/src/main/java/it/grid/storm/synchcall/command/space/ReleaseSpaceCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/space/ReleaseSpaceCommand.java @@ -26,31 +26,21 @@ import org.slf4j.LoggerFactory; /** - * This class represents the ReleaseSpaceManager Class. This class hava a - * reseveSpace method that perform all operation nedded to satisfy a SRM space - * release request. - * - * @author Magnoni Luca - * @author Cnaf -INFN Bologna - * @date - * @version 1.0 + * This class represents the ReleaseSpaceManager Class. This class has a reseveSpace method that + * perform all operation needed to satisfy a SRM space release request. */ public class ReleaseSpaceCommand extends SpaceCommand implements Command { private final ReservedSpaceCatalog catalog; - /** - * Logger - */ - private static final Logger log = LoggerFactory - .getLogger(ReleaseSpaceCommand.class); + private static final Logger log = LoggerFactory.getLogger(ReleaseSpaceCommand.class); private static final String SRM_COMMAND = "srmReleaseSpace"; public ReleaseSpaceCommand() { - catalog = new ReservedSpaceCatalog(); + catalog = ReservedSpaceCatalog.getInstance(); }; public OutputData execute(InputData indata) { @@ -60,20 +50,16 @@ public OutputData execute(InputData indata) { if (indata instanceof IdentityInputData) { inputData = (IdentityReleaseSpaceInputData) indata; } else { - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_NOT_SUPPORTED, "Anonymous user can not perform" - + SRM_COMMAND)); - printRequestOutcome(outputData.getStatus(), - (ReleaseSpaceInputData) indata); + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_NOT_SUPPORTED, + "Anonymous user can not perform" + SRM_COMMAND)); + printRequestOutcome(outputData.getStatus(), (ReleaseSpaceInputData) indata); return outputData; } TReturnStatus returnStatus = null; - if ((inputData == null) - || ((inputData != null) && (inputData.getSpaceToken() == null))) { + if ((inputData == null) || ((inputData != null) && (inputData.getSpaceToken() == null))) { log.error("Empty space token."); - returnStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, - "SpaceToken is empty."); + returnStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, "SpaceToken is empty."); outputData.setStatus(returnStatus); return outputData; } @@ -82,11 +68,11 @@ public OutputData execute(InputData indata) { if (user == null) { log.debug("Null user credentials."); returnStatus = new TReturnStatus(TStatusCode.SRM_AUTHENTICATION_FAILURE, - "Unable to get user credential"); + "Unable to get user credential"); outputData.setStatus(returnStatus); - log.error("srmReleaseSpace: <{}> Request for [spacetoken: {}] failed " - + "with: [status: {}]", user, inputData.getSpaceToken(), returnStatus); + log.error("srmReleaseSpace: <{}> Request for [spacetoken: {}] failed " + "with: [status: {}]", + user, inputData.getSpaceToken(), returnStatus); return outputData; } @@ -100,16 +86,16 @@ public OutputData execute(InputData indata) { try { data = catalog.getStorageSpace(inputData.getSpaceToken()); } catch (Throwable e) { - log.error("Error fetching data for space token {}. {}", - inputData.getSpaceToken(), e.getMessage(), e); + log.error("Error fetching data for space token {}. {}", inputData.getSpaceToken(), + e.getMessage(), e); explanation = "Error building space data from row DB data."; statusCode = TStatusCode.SRM_INTERNAL_ERROR; returnStatus = new TReturnStatus(statusCode, explanation); outputData.setStatus(returnStatus); - log.error("srmReleaseSpace: <{}> Request for [spacetoken: {}] failed " - + "with: [status: {}]", user, inputData.getSpaceToken(), returnStatus); + log.error("srmReleaseSpace: <{}> Request for [spacetoken: {}] failed " + "with: [status: {}]", + user, inputData.getSpaceToken(), returnStatus); return outputData; } @@ -120,8 +106,8 @@ public OutputData execute(InputData indata) { returnStatus = new TReturnStatus(statusCode, explanation); outputData.setStatus(returnStatus); - log.error("srmReleaseSpace: <{}> Request for [spacetoken: {}] failed " - + "with: [status: {}]", user, inputData.getSpaceToken(), returnStatus); + log.error("srmReleaseSpace: <{}> Request for [spacetoken: {}] failed " + "with: [status: {}]", + user, inputData.getSpaceToken(), returnStatus); return outputData; } @@ -157,13 +143,12 @@ public OutputData execute(InputData indata) { if (returnStatus.isSRM_SUCCESS()) { log.error("srmReleaseSpace: <{}> Request for [spacetoken: {}] succesfully done " - + "with: [status: {}]", user, inputData.getSpaceToken(), returnStatus); - + + "with: [status: {}]", user, inputData.getSpaceToken(), returnStatus); + } else { log.error("srmReleaseSpace: <" + user + "> Request for [spacetoken:" - + inputData.getSpaceToken() + "] for failed with: [status:" - + returnStatus + "]"); + + inputData.getSpaceToken() + "] for failed with: [status:" + returnStatus + "]"); } @@ -172,14 +157,12 @@ public OutputData execute(InputData indata) { /** * - * @param user - * GridUserInterface - * @param data - * StorageSpaceData + * @param user GridUserInterface + * @param data StorageSpaceData * @return TReturnStatus */ private TReturnStatus manageAuthorizedReleaseSpace(StorageSpaceData data, - GridUserInterface user) { + GridUserInterface user) { String spaceFileName; PFN pfn = data.getSpaceFileName(); @@ -193,19 +176,17 @@ private TReturnStatus manageAuthorizedReleaseSpace(StorageSpaceData data, return new TReturnStatus(TStatusCode.SRM_SUCCESS, "Space Released."); } else { return new TReturnStatus(TStatusCode.SRM_INTERNAL_ERROR, - "Space removed, but spaceToken was not found in the DB"); + "Space removed, but spaceToken was not found in the DB"); } } else { - return new TReturnStatus(TStatusCode.SRM_FAILURE, - "Space can not be removed by StoRM!"); + return new TReturnStatus(TStatusCode.SRM_FAILURE, "Space can not be removed by StoRM!"); } } else { - return new TReturnStatus(TStatusCode.SRM_FAILURE, "SRM Internal failure."); + return new TReturnStatus(TStatusCode.SRM_FAILURE, "SRM Internal failure."); } } - private void printRequestOutcome(TReturnStatus status, - ReleaseSpaceInputData indata) { + private void printRequestOutcome(TReturnStatus status, ReleaseSpaceInputData indata) { if (indata != null) { CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, indata); diff --git a/src/main/java/it/grid/storm/synchcall/command/space/ReserveSpaceCommand.java b/src/main/java/it/grid/storm/synchcall/command/space/ReserveSpaceCommand.java index 73a4a3eef..81f1c74c9 100644 --- a/src/main/java/it/grid/storm/synchcall/command/space/ReserveSpaceCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/space/ReserveSpaceCommand.java @@ -11,10 +11,8 @@ import it.grid.storm.acl.AclManager; import it.grid.storm.acl.AclManagerFS; -import it.grid.storm.catalogs.InvalidSpaceDataAttributesException; import it.grid.storm.catalogs.ReservedSpaceCatalog; import it.grid.storm.common.types.PFN; -import it.grid.storm.common.types.SizeUnit; import it.grid.storm.filesystem.FilesystemPermission; import it.grid.storm.filesystem.LocalFile; import it.grid.storm.filesystem.ReservationException; @@ -22,13 +20,13 @@ import it.grid.storm.griduser.CannotMapUserException; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.LocalUser; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.model.VirtualFS; import it.grid.storm.namespace.naming.NamespaceUtil; import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.exceptions.InvalidSpaceDataAttributesException; import it.grid.storm.persistence.model.TransferObjectDecodingException; import it.grid.storm.space.StorageSpaceData; import it.grid.storm.srm.types.InvalidTSizeAttributesException; @@ -59,10 +57,9 @@ public class ReserveSpaceCommand extends SpaceCommand implements Command { private ReservedSpaceCatalog catalog; - private static final Logger log = LoggerFactory - .getLogger(ReserveSpaceCommand.class); + private static final Logger log = LoggerFactory.getLogger(ReserveSpaceCommand.class); - private NamespaceInterface namespace; + private Namespace namespace; private static final String SRM_COMMAND = "srmReserveSpace"; @@ -70,14 +67,15 @@ public class ReserveSpaceCommand extends SpaceCommand implements Command { String explanation = null; private void logRequestSuccess(GridUserInterface user, TSizeInBytes desSize, - TSizeInBytes guarSize, TLifeTimeInSeconds lifetime, - TRetentionPolicyInfo rpinfo, TReturnStatus status) { - - log.info("srmReservespace: <{}> Request for [desiredSizeOfTotalSpace: {}," - + " desiredSizeOfGuaranteedSpace: {}] with " - + "[desiredLifetimeOfReservedSpace: {}, retentionPolicyInfo: {}]" - + "succesfully done with: [status: {}]", user, desSize, guarSize, - lifetime, rpinfo, status); + TSizeInBytes guarSize, TLifeTimeInSeconds lifetime, TRetentionPolicyInfo rpinfo, + TReturnStatus status) { + + log.info( + "srmReservespace: <{}> Request for [desiredSizeOfTotalSpace: {}," + + " desiredSizeOfGuaranteedSpace: {}] with " + + "[desiredLifetimeOfReservedSpace: {}, retentionPolicyInfo: {}]" + + "succesfully done with: [status: {}]", + user, desSize, guarSize, lifetime, rpinfo, status); } private void logRequestFailure(TStatusCode code, String explanation) { @@ -87,29 +85,27 @@ private void logRequestFailure(TStatusCode code, String explanation) { } private void logRequestFailure(GridUserInterface user, TSizeInBytes desSize, - TSizeInBytes guarSize, TLifeTimeInSeconds lifetime, - TRetentionPolicyInfo rpinfo, TStatusCode code, String explanation) { + TSizeInBytes guarSize, TLifeTimeInSeconds lifetime, TRetentionPolicyInfo rpinfo, + TStatusCode code, String explanation) { TReturnStatus status = new TReturnStatus(code, explanation); log.error("srmReservespace: <{}> Request for [desiredSizeOfTotalSpace: {}," - + " desiredSizeOfGuaranteedSpace: {}] with " - + "[desiredLifetimeOfReservedSpace: {}, retentionPolicyInfo: {}]" - + "failed with: [status: {}]", user, desSize, guarSize, lifetime, rpinfo, - status); + + " desiredSizeOfGuaranteedSpace: {}] with " + + "[desiredLifetimeOfReservedSpace: {}, retentionPolicyInfo: {}]" + + "failed with: [status: {}]", user, desSize, guarSize, lifetime, rpinfo, status); } public ReserveSpaceCommand() { - namespace = NamespaceDirector.getNamespace(); - catalog = new ReservedSpaceCatalog(); + namespace = Namespace.getInstance(); + catalog = ReservedSpaceCatalog.getInstance(); } /** * Method that provide space reservation for srmReserveSpace request. * - * @param data - * Contain information about data procived in SRM request. + * @param data Contain information about data procived in SRM request. * @return SpaceResOutputData that contain all SRM return parameter. * @todo Implement this it.grid.storm.synchcall.space.SpaceManager method */ @@ -120,11 +116,9 @@ public OutputData execute(InputData indata) { data = (IdentityReserveSpaceInputData) indata; } else { GetSpaceMetaDataOutputData outputData = new GetSpaceMetaDataOutputData(); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_NOT_SUPPORTED, "Anonymous user can not perform" - + SRM_COMMAND)); - printRequestOutcome(outputData.getStatus(), - (ReserveSpaceInputData) indata); + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_NOT_SUPPORTED, + "Anonymous user can not perform" + SRM_COMMAND)); + printRequestOutcome(outputData.getStatus(), (ReserveSpaceInputData) indata); return outputData; } log.debug(":reserveSpace start."); @@ -139,9 +133,8 @@ public OutputData execute(InputData indata) { } catch (Exception e) { log.error(e.getMessage(), e); - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return manageError(statusCode, explanation); } @@ -153,9 +146,8 @@ public OutputData execute(InputData indata) { log.error(e.getMessage(), e); - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return manageError(statusCode, explanation); } @@ -168,43 +160,37 @@ public OutputData execute(InputData indata) { log.error(e.getMessage(), e); - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return manageError(statusCode, explanation); } SpaceSize spaceSize = null; try { - spaceSize = computeSpaceSize(data.getDesiredSize(), - data.getGuaranteedSize(), vfs); + spaceSize = computeSpaceSize(data.getDesiredSize(), data.getGuaranteedSize(), vfs); } catch (Exception e) { log.error(e.getMessage(), e); - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return manageError(statusCode, explanation); } StoRI spaceStori = null; try { - spaceStori = getSpaceStoRI(vfs, relativeSpaceFN, - spaceSize.getDesiderataSpaceSize()); + spaceStori = getSpaceStoRI(vfs, relativeSpaceFN, spaceSize.getDesiderataSpaceSize()); } catch (Exception e) { log.error(e.getMessage(), e); - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return manageError(statusCode, explanation); } - log - .debug("Reserve Space File Size: {}", spaceSize.getDesiderataSpaceSize()); + log.debug("Reserve Space File Size: {}", spaceSize.getDesiderataSpaceSize()); try { spaceStori.getSpace().fakeAllot(); @@ -213,9 +199,8 @@ public OutputData execute(InputData indata) { statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to create Space File into filesystem. \n"; - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return manageError(statusCode, explanation); } @@ -226,9 +211,8 @@ public OutputData execute(InputData indata) { } catch (Exception e) { log.error(e.getMessage(), e); - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); revertAllocation(spaceStori.getSpace()); return manageError(statusCode, explanation); @@ -236,15 +220,14 @@ public OutputData execute(InputData indata) { TSpaceToken spaceToken = null; try { - spaceToken = registerIntoDB(data.getUser(), data.getSpaceTokenAlias(), - spaceSize.getTotalSize(), spaceSize.getDesiderataSpaceSize(), - data.getSpaceLifetime(), spaceStori.getPFN()); + spaceToken = + registerIntoDB(data.getUser(), data.getSpaceTokenAlias(), spaceSize.getTotalSize(), + spaceSize.getDesiderataSpaceSize(), data.getSpaceLifetime(), spaceStori.getPFN()); } catch (Exception e) { log.error(e.getMessage(), e); - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); revertAllocation(spaceStori.getSpace()); return manageError(statusCode, explanation); @@ -254,16 +237,14 @@ public OutputData execute(InputData indata) { try { output = buildOutput(spaceSize, spaceToken, data.getSpaceLifetime()); - logRequestSuccess(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), output.getStatus()); + logRequestSuccess(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), output.getStatus()); } catch (Exception e) { statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to build a valid output object "; - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); revertAllocation(spaceStori.getSpace()); return manageError(statusCode, explanation); } @@ -280,7 +261,7 @@ private void revertAllocation(Space space) { } private StoRI getSpaceStoRI(VirtualFS vfs, String relativeSpaceFN, - TSizeInBytes desiderataSpaceSize) throws Exception { + TSizeInBytes desiderataSpaceSize) throws Exception { StoRI spaceFile = null; try { @@ -316,31 +297,28 @@ private boolean checkParameters(IdentityReserveSpaceInputData data) { log.debug("Null retentionPolicyInfo."); statusCode = TStatusCode.SRM_INVALID_REQUEST; explanation = "RetentionPolicy not specified."; - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return false; } TAccessLatency latency = data.getRetentionPolicyInfo().getAccessLatency(); - TRetentionPolicy retentionPolicy = data.getRetentionPolicyInfo() - .getRetentionPolicy(); + TRetentionPolicy retentionPolicy = data.getRetentionPolicyInfo().getRetentionPolicy(); - if (!((latency == null || latency.equals(TAccessLatency.EMPTY) || latency - .equals(TAccessLatency.ONLINE)) && (retentionPolicy == null - || retentionPolicy.equals(TRetentionPolicy.EMPTY) || retentionPolicy - .equals(TRetentionPolicy.REPLICA)))) { + if (!((latency == null || latency.equals(TAccessLatency.EMPTY) + || latency.equals(TAccessLatency.ONLINE)) + && (retentionPolicy == null || retentionPolicy.equals(TRetentionPolicy.EMPTY) + || retentionPolicy.equals(TRetentionPolicy.REPLICA)))) { - log.debug("Invalid retentionPolicyInfo: {}, {}", data - .getRetentionPolicyInfo().getAccessLatency(), data - .getRetentionPolicyInfo().getRetentionPolicy()); + log.debug("Invalid retentionPolicyInfo: {}, {}", + data.getRetentionPolicyInfo().getAccessLatency(), + data.getRetentionPolicyInfo().getRetentionPolicy()); statusCode = TStatusCode.SRM_NOT_SUPPORTED; explanation = "RetentionPolicy requested cannot be satisfied."; - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return false; } @@ -378,19 +356,15 @@ private VirtualFS getSpaceVFS(String spaceFN) throws Exception { return vfs; } - private void setDefaults(IdentityReserveSpaceInputData data, - VirtualFS vfs) { + private void setDefaults(IdentityReserveSpaceInputData data, VirtualFS vfs) { if (data.getRetentionPolicyInfo().getAccessLatency() == null - || data.getRetentionPolicyInfo().getAccessLatency() - .equals(TAccessLatency.EMPTY)) { + || data.getRetentionPolicyInfo().getAccessLatency().equals(TAccessLatency.EMPTY)) { data.getRetentionPolicyInfo().setAccessLatency(TAccessLatency.ONLINE); } if (data.getRetentionPolicyInfo().getRetentionPolicy() == null - || data.getRetentionPolicyInfo().getRetentionPolicy() - .equals(TRetentionPolicy.EMPTY)) { - data.getRetentionPolicyInfo() - .setRetentionPolicy(TRetentionPolicy.REPLICA); + || data.getRetentionPolicyInfo().getRetentionPolicy().equals(TRetentionPolicy.EMPTY)) { + data.getRetentionPolicyInfo().setRetentionPolicy(TRetentionPolicy.REPLICA); } if (data.getSpaceLifetime().isEmpty()) { log.debug("LifeTime is EMPTY. Using default value."); @@ -398,13 +372,12 @@ private void setDefaults(IdentityReserveSpaceInputData data, } } - private SpaceSize computeSpaceSize(TSizeInBytes totalSize, - TSizeInBytes guarSize, VirtualFS vfs) throws Exception { + private SpaceSize computeSpaceSize(TSizeInBytes totalSize, TSizeInBytes guarSize, VirtualFS vfs) + throws Exception { TSizeInBytes desiderataSpaceSize = TSizeInBytes.makeEmpty(); - if ((!(totalSize.isEmpty())) - && (!((guarSize.isEmpty()) || guarSize.value() == 0))) { + if ((!(totalSize.isEmpty())) && (!((guarSize.isEmpty()) || guarSize.value() == 0))) { if (totalSize.value() < guarSize.value()) { log.debug("Error: totalSize < guaranteedSize"); statusCode = TStatusCode.SRM_INVALID_REQUEST; @@ -439,8 +412,8 @@ private SpaceSize computeSpaceSize(TSizeInBytes totalSize, } /* - * At this point either totalSize and guarSize contains significative value. - * desiderataSpaceSize is setted to totalSize. + * At this point either totalSize and guarSize contains relevant value. desiderataSpaceSize is + * set to totalSize. */ desiderataSpaceSize = totalSize; // This is valid because StoRM only reserve GUARANTEED space. @@ -448,23 +421,20 @@ private SpaceSize computeSpaceSize(TSizeInBytes totalSize, TSizeInBytes freeSpace = null; try { - freeSpace = TSizeInBytes.make(vfs.getFilesystem().getFreeSpace(), - SizeUnit.BYTES); + freeSpace = TSizeInBytes.make(vfs.getFilesystem().getFreeSpace()); } catch (InvalidTSizeAttributesException e) { - log - .debug("Error while retrieving free Space in underlying Filesystem", e); + log.debug("Error while retrieving free Space in underlying Filesystem", e); statusCode = TStatusCode.SRM_INTERNAL_ERROR; - explanation = "Error while retrieving free Space in underlying Filesystem \n" - + e; + explanation = "Error while retrieving free Space in underlying Filesystem \n" + e; throw new Exception(explanation); } catch (NamespaceException ex) { - log - .debug( + log.debug( "Error while retrieving free Space in underlying Filesystem. Unable to retrieve FS Driver", ex); statusCode = TStatusCode.SRM_INTERNAL_ERROR; - explanation = "Error while retrieving free Space in underlying Filesystem. Unable to retrieve FS Driver \n" - + ex; + explanation = + "Error while retrieving free Space in underlying Filesystem. Unable to retrieve FS Driver \n" + + ex; throw new Exception(explanation); } @@ -472,12 +442,11 @@ private SpaceSize computeSpaceSize(TSizeInBytes totalSize, * @todo Change here, also granted SpaceSize must be considered. */ boolean lower_space = false; - // If there is not enogh free space on storage + // If there is not enough free space on storage if (freeSpace.value() < desiderataSpaceSize.value()) { if (freeSpace.value() < guarSize.value()) { - // Not enough freespace - log - .debug(":reserveSpace Not Enough Free Space on storage!"); + // Not enough free space + log.debug(":reserveSpace Not Enough Free Space on storage!"); statusCode = TStatusCode.SRM_NO_FREE_SPACE; explanation = "SRM has not more free space."; throw new Exception(explanation); @@ -490,21 +459,18 @@ private SpaceSize computeSpaceSize(TSizeInBytes totalSize, return this.new SpaceSize(desiderataSpaceSize, totalSize, lower_space); } - private String getRelativeSpaceFilePath(VirtualFS vfs, String spaceFN) - throws Exception { + private String getRelativeSpaceFilePath(VirtualFS vfs, String spaceFN) throws Exception { String relativeSpaceFN = null; - relativeSpaceFN = NamespaceUtil.extractRelativePath(vfs.getRootPath(), - spaceFN); + relativeSpaceFN = NamespaceUtil.extractRelativePath(vfs.getRootPath(), spaceFN); log.debug("relativeSpaceFN: {}", relativeSpaceFN); return relativeSpaceFN; } - private void setSpaceFilePermissions(StoRI spaceStori, GridUserInterface user) - throws Exception { + private void setSpaceFilePermissions(StoRI spaceStori, GridUserInterface user) throws Exception { FilesystemPermission fp = FilesystemPermission.ReadWrite; @@ -520,8 +486,7 @@ private void setSpaceFilePermissions(StoRI spaceStori, GridUserInterface user) throw new Exception(explanation); } if (localFile == null || localUser == null) { - log.error("ACL setup error. localFile={} , localUser={}", localFile, - localUser); + log.error("ACL setup error. localFile={} , localUser={}", localFile, localUser); statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to setting up the ACL "; throw new Exception(explanation); @@ -547,29 +512,27 @@ private void setSpaceFilePermissions(StoRI spaceStori, GridUserInterface user) } } - private TSpaceToken registerIntoDB(GridUserInterface user, - String spaceTokenAlias, TSizeInBytes totalSize, - TSizeInBytes desiderataSpaceSize, TLifeTimeInSeconds lifeTime, PFN pfn) - throws Exception { + private TSpaceToken registerIntoDB(GridUserInterface user, String spaceTokenAlias, + TSizeInBytes totalSize, TSizeInBytes desiderataSpaceSize, TLifeTimeInSeconds lifeTime, + PFN pfn) throws Exception { StorageSpaceData spaceData = null; try { - spaceData = new StorageSpaceData(user, TSpaceType.PERMANENT, - spaceTokenAlias, totalSize, desiderataSpaceSize, lifeTime, null, - new Date(), pfn); + spaceData = new StorageSpaceData(user, TSpaceType.PERMANENT, spaceTokenAlias, totalSize, + desiderataSpaceSize, lifeTime, null, new Date(), pfn); } catch (InvalidSpaceDataAttributesException e) { log.debug("Unable to create Storage Space Data", e); statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to create storage space data."; - logRequestFailure(user, totalSize, desiderataSpaceSize, lifeTime, null, - statusCode, explanation); + logRequestFailure(user, totalSize, desiderataSpaceSize, lifeTime, null, statusCode, + explanation); throw new Exception(explanation); } - spaceData.setUsedSpaceSize(TSizeInBytes.make(0, SizeUnit.BYTES)); - spaceData.setUnavailableSpaceSize(TSizeInBytes.make(0, SizeUnit.BYTES)); + spaceData.setUsedSpaceSize(TSizeInBytes.make(0)); + spaceData.setUnavailableSpaceSize(TSizeInBytes.make(0)); spaceData.setReservedSpaceSize(desiderataSpaceSize); log.debug("Created space data: {}", spaceData); @@ -579,8 +542,8 @@ private TSpaceToken registerIntoDB(GridUserInterface user, log.debug("Unable to register Storage Space Data into DB", e); statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to register Storage Space Data into DB."; - logRequestFailure(user, totalSize, desiderataSpaceSize, lifeTime, null, - statusCode, explanation); + logRequestFailure(user, totalSize, desiderataSpaceSize, lifeTime, null, statusCode, + explanation); throw new Exception(explanation); } @@ -592,31 +555,30 @@ private TSpaceToken registerIntoDB(GridUserInterface user, statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to create space token."; - logRequestFailure(user, totalSize, desiderataSpaceSize, lifeTime, null, - statusCode, explanation); + logRequestFailure(user, totalSize, desiderataSpaceSize, lifeTime, null, statusCode, + explanation); throw new Exception(explanation); } return spaceToken; } - private ReserveSpaceOutputData buildOutput(SpaceSize spaceSize, - TSpaceToken spaceToken, TLifeTimeInSeconds lifeTime) throws Exception { + private ReserveSpaceOutputData buildOutput(SpaceSize spaceSize, TSpaceToken spaceToken, + TLifeTimeInSeconds lifeTime) throws Exception { TReturnStatus status = null; - if (!spaceSize.isLowerSpace()) { - status = new TReturnStatus(TStatusCode.SRM_SUCCESS, - "Space Reservation done"); + if (!spaceSize.isLowerSpace()) { + status = new TReturnStatus(TStatusCode.SRM_SUCCESS, "Space Reservation done"); - } else { - status = new TReturnStatus(TStatusCode.SRM_LOWER_SPACE_GRANTED, - "Space Reservation done, lower space granted."); - } + } else { + status = new TReturnStatus(TStatusCode.SRM_LOWER_SPACE_GRANTED, + "Space Reservation done, lower space granted."); + } ReserveSpaceOutputData outputData = null; try { outputData = new ReserveSpaceOutputData(spaceSize.getTotalSize(), - spaceSize.getDesiderataSpaceSize(), lifeTime, spaceToken, status); + spaceSize.getDesiderataSpaceSize(), lifeTime, spaceToken, status); } catch (InvalidReserveSpaceOutputDataAttributesException e) { log.error(e.getMessage(), e); statusCode = TStatusCode.SRM_INTERNAL_ERROR; @@ -632,8 +594,7 @@ private class SpaceSize { private final TSizeInBytes totalSize; private final boolean lowerSpace; - public SpaceSize(TSizeInBytes desiderataSpaceSize, TSizeInBytes totalSize, - boolean lowerSpace) { + public SpaceSize(TSizeInBytes desiderataSpaceSize, TSizeInBytes totalSize, boolean lowerSpace) { this.desiderataSpaceSize = desiderataSpaceSize; this.totalSize = totalSize; @@ -659,9 +620,7 @@ protected boolean isLowerSpace() { /** * Method that reset an already done reservation to the original status. * - * @param token - * TSpaceToken that contains information about data procived in SRM - * request. + * @param token TSpaceToken that contains information about data procived in SRM request. * @return TReturnStatus that contains of all SRM return parameters. */ public TReturnStatus resetReservation(TSpaceToken token) { @@ -705,8 +664,7 @@ public TReturnStatus resetReservation(TSpaceToken token) { } String relativeSpaceFN = null; - relativeSpaceFN = NamespaceUtil.extractRelativePath(vfs.getRootPath(), - spaceFN); + relativeSpaceFN = NamespaceUtil.extractRelativePath(vfs.getRootPath(), spaceFN); log.debug("relativeSpaceFN: {}", relativeSpaceFN); @@ -719,8 +677,7 @@ public TReturnStatus resetReservation(TSpaceToken token) { StoRI spaceFile = null; try { - spaceFile = vfs.createSpace(relativeSpaceFN, - desiderataSpaceSize.value()); + spaceFile = vfs.createSpace(relativeSpaceFN, desiderataSpaceSize.value()); } catch (NamespaceException e) { log.debug(e.getMessage(), e); statusCode = TStatusCode.SRM_INTERNAL_ERROR; @@ -746,8 +703,7 @@ public TReturnStatus resetReservation(TSpaceToken token) { LocalFile localFile = spaceFile.getLocalFile(); LocalUser localUser = user.getLocalUser(); if (localFile == null || localUser == null) { - log.error("ACL setup error. localFile={} localUser={}", localFile, - localUser); + log.error("ACL setup error. localFile={} localUser={}", localFile, localUser); statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to setting up the ACL "; return manageErrorStatus(statusCode, explanation); @@ -773,8 +729,7 @@ public TReturnStatus resetReservation(TSpaceToken token) { LocalFile localFile = spaceFile.getLocalFile(); LocalUser localUser = user.getLocalUser(); if (localFile == null || localUser == null) { - log.error("ACL setup error. localFile={} localUser={}", localFile, - localUser); + log.error("ACL setup error. localFile={} localUser={}", localFile, localUser); statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to setting up the ACL "; return manageErrorStatus(statusCode, explanation); @@ -805,16 +760,14 @@ public TReturnStatus resetReservation(TSpaceToken token) { } catch (DataAccessException e) { log.error(e.getMessage(), e); statusCode = TStatusCode.SRM_INTERNAL_ERROR; - explanation = "Error persisting space token data into the DB\n" - + e.getMessage(); + explanation = "Error persisting space token data into the DB\n" + e.getMessage(); return manageErrorStatus(statusCode, explanation); } return manageErrorStatus(TStatusCode.SRM_SUCCESS, "Successfull creation."); } - public TReturnStatus updateReservation(TSpaceToken token, - TSizeInBytes sizeToAdd, TSURL toSurl) { + public TReturnStatus updateReservation(TSpaceToken token, TSizeInBytes sizeToAdd, TSURL toSurl) { String explanation = null; TStatusCode statusCode = TStatusCode.EMPTY; @@ -856,8 +809,7 @@ public TReturnStatus updateReservation(TSpaceToken token, String relativeSpaceFN = null; - relativeSpaceFN = NamespaceUtil.extractRelativePath(vfs.getRootPath(), - spaceFN); + relativeSpaceFN = NamespaceUtil.extractRelativePath(vfs.getRootPath(), spaceFN); TSizeInBytes desiderataSpaceSize = sdata.getTotalSpaceSize(); TSizeInBytes availableSize = sdata.getAvailableSpaceSize(); @@ -866,8 +818,7 @@ public TReturnStatus updateReservation(TSpaceToken token, log.debug("Size of removed file: {}" + sizeToAdd.value()); try { - desiderataSpaceSize = TSizeInBytes.make( - availableSize.value() + sizeToAdd.value(), SizeUnit.BYTES); + desiderataSpaceSize = TSizeInBytes.make(availableSize.value() + sizeToAdd.value()); } catch (InvalidTSizeAttributesException e) { log.error(e.getMessage()); } @@ -910,8 +861,7 @@ public TReturnStatus updateReservation(TSpaceToken token, localFile = spaceFile.getLocalFile(); LocalUser localUser = user.getLocalUser(); if (localFile == null || localUser == null) { - log.error("ACL setup error. localFile={} localUser={}", localFile, - localUser); + log.error("ACL setup error. localFile={} localUser={}", localFile, localUser); revertOldSpaceFileDeletion(localFile); statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to setting up the ACL "; @@ -940,16 +890,14 @@ public TReturnStatus updateReservation(TSpaceToken token, localFile = spaceFile.getLocalFile(); LocalUser localUser = user.getLocalUser(); if (localFile == null || localUser == null) { - log.error("ACL setup error. localFile={} localUser={}", localFile, - localUser); + log.error("ACL setup error. localFile={} localUser={}", localFile, localUser); revertOldSpaceFileDeletion(localFile); statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to setting up the ACL "; return manageErrorStatus(statusCode, explanation); } else { try { - manager.grantGroupPermission(spaceFile.getLocalFile(), localUser, - fp); + manager.grantGroupPermission(spaceFile.getLocalFile(), localUser, fp); } catch (IllegalArgumentException e) { log.error(e.getMessage(), e); revertOldSpaceFileDeletion(localFile); @@ -968,14 +916,12 @@ public TReturnStatus updateReservation(TSpaceToken token, } try { - availableSize = TSizeInBytes.make(sdata.getAvailableSpaceSize().value() - + sizeToAdd.value(), SizeUnit.BYTES); + availableSize = TSizeInBytes.make(sdata.getAvailableSpaceSize().value() + sizeToAdd.value()); } catch (InvalidTSizeAttributesException e) { log.error(e.getMessage(), e); revertOldSpaceFileDeletion(localFile); statusCode = TStatusCode.SRM_INTERNAL_ERROR; - explanation = "Error computing new available space size\n" - + e.getMessage(); + explanation = "Error computing new available space size\n" + e.getMessage(); return manageErrorStatus(statusCode, explanation); } @@ -987,8 +933,7 @@ public TReturnStatus updateReservation(TSpaceToken token, log.error(e.getMessage(), e); revertOldSpaceFileDeletion(localFile); statusCode = TStatusCode.SRM_INTERNAL_ERROR; - explanation = "Error persisting space token data into the DB\n" - + e.getMessage(); + explanation = "Error persisting space token data into the DB\n" + e.getMessage(); return manageErrorStatus(statusCode, explanation); } return manageErrorStatus(TStatusCode.SRM_SUCCESS, "Successfull creation."); @@ -998,8 +943,7 @@ private void revertOldSpaceFileDeletion(LocalFile localFile) { } - private ReserveSpaceOutputData manageError(TStatusCode statusCode, - String explanation) { + private ReserveSpaceOutputData manageError(TStatusCode statusCode, String explanation) { TReturnStatus status = null; try { @@ -1011,8 +955,7 @@ private ReserveSpaceOutputData manageError(TStatusCode statusCode, return new ReserveSpaceOutputData(status); } - private TReturnStatus manageErrorStatus(TStatusCode statusCode, - String explanation) { + private TReturnStatus manageErrorStatus(TStatusCode statusCode, String explanation) { TReturnStatus status = null; try { @@ -1023,8 +966,7 @@ private TReturnStatus manageErrorStatus(TStatusCode statusCode, return status; } - private void printRequestOutcome(TReturnStatus status, - ReserveSpaceInputData data) { + private void printRequestOutcome(TReturnStatus status, ReserveSpaceInputData data) { if (data != null) { CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, data); diff --git a/src/main/java/it/grid/storm/synchcall/data/datatransfer/AnonymousPrepareToPutInputData.java b/src/main/java/it/grid/storm/synchcall/data/datatransfer/AnonymousPrepareToPutInputData.java index ecb50b277..b6a3d8c77 100644 --- a/src/main/java/it/grid/storm/synchcall/data/datatransfer/AnonymousPrepareToPutInputData.java +++ b/src/main/java/it/grid/storm/synchcall/data/datatransfer/AnonymousPrepareToPutInputData.java @@ -4,104 +4,96 @@ */ package it.grid.storm.synchcall.data.datatransfer; -import it.grid.storm.catalogs.OverwriteModeConverter; import it.grid.storm.common.types.TURLPrefix; import it.grid.storm.common.types.TimeUnit; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; +import it.grid.storm.persistence.converter.OverwriteModeConverter; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TOverwriteMode; import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TSizeInBytes; -/** - * @author Michele Dibenedetto - * - */ -public class AnonymousPrepareToPutInputData extends - AnonymousFileTransferInputData implements PrepareToPutInputData { - - private TOverwriteMode overwriteMode = OverwriteModeConverter.getInstance() - .toSTORM(Configuration.getInstance().getDefaultOverwriteMode()); - private TSizeInBytes fileSize = TSizeInBytes.makeEmpty(); - private TLifeTimeInSeconds desiredFileLifetime; - - /** - * @param user - * @param surl - * @param transferProtocols - * @throws IllegalArgumentException - * @throws IllegalStateException - */ - public AnonymousPrepareToPutInputData(TSURL surl, TURLPrefix transferProtocols) - throws IllegalArgumentException, IllegalStateException { - - super(surl, transferProtocols); - this.desiredFileLifetime = TLifeTimeInSeconds.make(Configuration - .getInstance().getFileLifetimeDefault(), TimeUnit.SECONDS); - - } - - public AnonymousPrepareToPutInputData(TSURL surl, - TURLPrefix transferProtocols, TLifeTimeInSeconds desiredFileLifetime) - throws IllegalArgumentException, IllegalStateException { - - this(surl, transferProtocols); - this.desiredFileLifetime = desiredFileLifetime; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.synchcall.data.datatransfer.PrepareToPutInputData# - * getOverwriteMode() - */ - @Override - public TOverwriteMode getOverwriteMode() { - - return overwriteMode; - } - - @Override - public void setOverwriteMode(TOverwriteMode overwriteMode) { - - this.overwriteMode = overwriteMode; - } - - /* - * (non-Javadoc) - * - * @see - * it.grid.storm.synchcall.data.datatransfer.PrepareToPutInputData#getFileSize - * () - */ - @Override - public TSizeInBytes getFileSize() { - - return fileSize; - } - - @Override - public void setFileSize(TSizeInBytes fileSize) { - - this.fileSize = fileSize; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.synchcall.data.datatransfer.PrepareToPutInputData# - * getDesiredFileLifetime() - */ - @Override - public TLifeTimeInSeconds getDesiredFileLifetime() { - - return desiredFileLifetime; - } - - @Override - public void setDesiredFileLifetime(TLifeTimeInSeconds desiredFileLifetime) { - - this.desiredFileLifetime = desiredFileLifetime; - } +public class AnonymousPrepareToPutInputData extends AnonymousFileTransferInputData + implements PrepareToPutInputData { + + private TOverwriteMode overwriteMode = + OverwriteModeConverter.toSTORM(StormConfiguration.getInstance().getDefaultOverwriteMode()); + private TSizeInBytes fileSize = TSizeInBytes.makeEmpty(); + private TLifeTimeInSeconds desiredFileLifetime; + + /** + * @param user + * @param surl + * @param transferProtocols + * @throws IllegalArgumentException + * @throws IllegalStateException + */ + public AnonymousPrepareToPutInputData(TSURL surl, TURLPrefix transferProtocols) + throws IllegalArgumentException, IllegalStateException { + + super(surl, transferProtocols); + this.desiredFileLifetime = TLifeTimeInSeconds + .make(StormConfiguration.getInstance().getFileLifetimeDefault(), TimeUnit.SECONDS); + + } + + public AnonymousPrepareToPutInputData(TSURL surl, TURLPrefix transferProtocols, + TLifeTimeInSeconds desiredFileLifetime) + throws IllegalArgumentException, IllegalStateException { + + this(surl, transferProtocols); + this.desiredFileLifetime = desiredFileLifetime; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.synchcall.data.datatransfer.PrepareToPutInputData# getOverwriteMode() + */ + @Override + public TOverwriteMode getOverwriteMode() { + + return overwriteMode; + } + + @Override + public void setOverwriteMode(TOverwriteMode overwriteMode) { + + this.overwriteMode = overwriteMode; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.synchcall.data.datatransfer.PrepareToPutInputData#getFileSize () + */ + @Override + public TSizeInBytes getFileSize() { + + return fileSize; + } + + @Override + public void setFileSize(TSizeInBytes fileSize) { + + this.fileSize = fileSize; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.synchcall.data.datatransfer.PrepareToPutInputData# getDesiredFileLifetime() + */ + @Override + public TLifeTimeInSeconds getDesiredFileLifetime() { + + return desiredFileLifetime; + } + + @Override + public void setDesiredFileLifetime(TLifeTimeInSeconds desiredFileLifetime) { + + this.desiredFileLifetime = desiredFileLifetime; + } } diff --git a/src/main/java/it/grid/storm/tape/recalltable/model/PutTapeRecallStatusLogic.java b/src/main/java/it/grid/storm/tape/recalltable/model/PutTapeRecallStatusLogic.java index 3474ae2d0..62e7ba113 100644 --- a/src/main/java/it/grid/storm/tape/recalltable/model/PutTapeRecallStatusLogic.java +++ b/src/main/java/it/grid/storm/tape/recalltable/model/PutTapeRecallStatusLogic.java @@ -7,15 +7,16 @@ import static it.grid.storm.tape.recalltable.model.TapeRecallStatus.SUCCESS; import static javax.ws.rs.core.MediaType.TEXT_PLAIN_TYPE; +import it.grid.storm.catalogs.TapeRecallCatalog; import it.grid.storm.filesystem.FSException; import it.grid.storm.filesystem.LocalFile; import it.grid.storm.namespace.StoRI; import it.grid.storm.persistence.exceptions.DataAccessException; import it.grid.storm.persistence.model.TapeRecallTO; -import it.grid.storm.tape.recalltable.TapeRecallCatalog; import it.grid.storm.tape.recalltable.TapeRecallException; import java.util.Date; +import java.util.Optional; import java.util.UUID; import javax.ws.rs.core.Response; @@ -23,82 +24,88 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** - * @author Michele Dibenedetto - * - */ public class PutTapeRecallStatusLogic { - private static final Logger log = LoggerFactory - .getLogger(PutTapeRecallStatusLogic.class); - - /** - * @param requestToken - * @param stori - * @return - * @throws TapeRecallException - */ - public static Response serveRequest(String requestToken, StoRI stori) - throws TapeRecallException { - - LocalFile localFile = stori.getLocalFile(); - boolean fileOnDisk; - - try { - fileOnDisk = localFile.isOnDisk(); - } catch (FSException e) { - log.error("Unable to test file {} presence on disk. FSException {}" , localFile.getAbsolutePath() , e.getMessage() , e); - throw new TapeRecallException("Error checking file existence"); - } - - if (!fileOnDisk) { - return Response.ok(false, TEXT_PLAIN_TYPE).status(200).build(); - } - - if (!stori.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) { - // tape not enable for StoRI filesystem, nothing to do - return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); - } - - String pfn = localFile.getAbsolutePath(); - UUID taskId = TapeRecallTO.buildTaskIdFromFileName(pfn); - TapeRecallCatalog rtCat = new TapeRecallCatalog(); - boolean exists = false; - try { - exists = rtCat.existsTask(taskId, requestToken); - } catch (DataAccessException e) { - log.error("Error checking existence of a recall task for taskId={} requestToken={}. DataAccessException: {}" , taskId , requestToken , e.getMessage() , e); - throw new TapeRecallException("Error reading from tape recall table"); - } - if (!exists) { - // no recall tasks for this file, nothing to do - return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); - } - - TapeRecallTO task; - try { - task = rtCat.getTask(taskId, requestToken); - } catch (DataAccessException e) { - log.error("Unable to update task recall status because unable to retrieve groupTaskId for token {}. DataAccessException: {}", requestToken , e.getMessage(),e); - throw new TapeRecallException("Error reading from tape recall table"); - } - - if (TapeRecallStatus.getRecallTaskStatus(task.getStatusId()).equals(SUCCESS)) { - // status already updated, nothing to do - return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); - } - - UUID groupTaskId = task.getGroupTaskId(); - boolean updated; - try { - updated = rtCat.changeGroupTaskStatus(groupTaskId, SUCCESS, new Date()); - } catch (DataAccessException e) { - log.error("Unable to update task recall status for token {} with groupTaskId={}. DataAccessException : {}", requestToken , groupTaskId , e.getMessage() , e); - throw new TapeRecallException("Error updating tape recall table"); - } - if (updated) { - log.info("Task status set to SUCCESS. groupTaskId={} requestToken={} pfn={}" , groupTaskId , requestToken , pfn); - } - return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); - } + private static final Logger log = LoggerFactory.getLogger(PutTapeRecallStatusLogic.class); + + /** + * @param requestToken + * @param stori + * @return + * @throws TapeRecallException + */ + public static Response serveRequest(String requestToken, StoRI stori) throws TapeRecallException { + + LocalFile localFile = stori.getLocalFile(); + boolean fileOnDisk; + + try { + fileOnDisk = localFile.isOnDisk(); + } catch (FSException e) { + log.error("Unable to test file {} presence on disk. FSException {}", + localFile.getAbsolutePath(), e.getMessage(), e); + throw new TapeRecallException("Error checking file existence"); + } + + if (!fileOnDisk) { + return Response.ok(false, TEXT_PLAIN_TYPE).status(200).build(); + } + + if (!stori.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) { + // tape not enable for StoRI filesystem, nothing to do + return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); + } + + String pfn = localFile.getAbsolutePath(); + UUID taskId = TapeRecallTO.buildTaskIdFromFileName(pfn); + TapeRecallCatalog rtCat = TapeRecallCatalog.getInstance(); + boolean exists = false; + try { + exists = rtCat.existsTask(taskId, requestToken); + } catch (DataAccessException e) { + log.error( + "Error checking existence of a recall task for taskId={} requestToken={}. DataAccessException: {}", + taskId, requestToken, e.getMessage(), e); + throw new TapeRecallException("Error reading from tape recall table"); + } + if (!exists) { + // no recall tasks for this file, nothing to do + return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); + } + + Optional task; + try { + task = rtCat.getTask(taskId, requestToken); + if (task.isEmpty()) { + // no recall tasks for this file, nothing to do + return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); + } + } catch (DataAccessException e) { + log.error( + "Unable to update task recall status because unable to retrieve groupTaskId for token {}. DataAccessException: {}", + requestToken, e.getMessage(), e); + throw new TapeRecallException("Error reading from tape recall table"); + } + + if (TapeRecallStatus.getRecallTaskStatus(task.get().getStatusId()).equals(SUCCESS)) { + // status already updated, nothing to do + return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); + } + + UUID groupTaskId = task.get().getGroupTaskId(); + boolean updated; + try { + updated = rtCat.changeGroupTaskStatus(groupTaskId, SUCCESS, new Date()); + } catch (DataAccessException e) { + log.error( + "Unable to update task recall status for token {} with groupTaskId={}. DataAccessException : {}", + requestToken, groupTaskId, e.getMessage(), e); + throw new TapeRecallException("Error updating tape recall table"); + } + if (updated) { + log.info("Task status set to SUCCESS. groupTaskId={} requestToken={} pfn={}", groupTaskId, + requestToken, pfn); + } + return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); + } } diff --git a/src/main/java/it/grid/storm/tape/recalltable/model/PutTapeRecallStatusValidator.java b/src/main/java/it/grid/storm/tape/recalltable/model/PutTapeRecallStatusValidator.java index 807d5ac99..7f13d0b3a 100644 --- a/src/main/java/it/grid/storm/tape/recalltable/model/PutTapeRecallStatusValidator.java +++ b/src/main/java/it/grid/storm/tape/recalltable/model/PutTapeRecallStatusValidator.java @@ -4,13 +4,6 @@ */ package it.grid.storm.tape.recalltable.model; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.StoRI; -import it.grid.storm.srm.types.InvalidTSURLAttributesException; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.util.SURLValidator; -import it.grid.storm.util.TokenValidator; - import java.util.StringTokenizer; import javax.ws.rs.core.Response; @@ -18,122 +11,128 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import it.grid.storm.namespace.Namespace; +import it.grid.storm.namespace.StoRI; +import it.grid.storm.srm.types.InvalidTSURLAttributesException; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.util.SURLValidator; +import it.grid.storm.util.TokenValidator; + public class PutTapeRecallStatusValidator implements RequestValidator { - private static final Logger log = LoggerFactory - .getLogger(PutTapeRecallStatusValidator.class); + private static final Logger log = LoggerFactory.getLogger(PutTapeRecallStatusValidator.class); + + private String requestToken = null; + private StoRI stori = null; + private String inputString = null; + private Response validationResponse = null; + + public PutTapeRecallStatusValidator(String inputString) { - private String requestToken = null; - private StoRI stori = null; - private String inputString = null; - private Response validationResponse = null; + this.inputString = inputString; + } - public PutTapeRecallStatusValidator(String inputString) { + /** + * Parse and validate input. + *

+ * If this method returns true the input data can be retrieved with the methods: + * {@link #getRequestToken()} and {@link #getStoRI()}. + *

+ * If this method returns false the response can be retrieved with the method + * {@link #getResponse()}. + * + * @return true for successful validation process, false otherwise. + */ + public boolean validate() { - this.inputString = inputString; - } + StringTokenizer tokenizer = new StringTokenizer(inputString, "\n"); - /** - * Parse and validate input. - *

- * If this method returns true the input data can be retrieved - * with the methods: {@link #getRequestToken()} and {@link #getStoRI()}. - *

- * If this method returns false the response can be retrieved - * with the method {@link #getResponse()}. - * - * @return true for successful validation process, - * false otherwise. - */ - public boolean validate() { + if (tokenizer.countTokens() != 2) { - StringTokenizer tokenizer = new StringTokenizer(inputString, "\n"); + log.trace("putTaskStatus() - input error"); - if (tokenizer.countTokens() != 2) { + validationResponse = Response.status(400).build(); + return false; - log.trace("putTaskStatus() - input error"); + } - validationResponse = Response.status(400).build(); - return false; + String requestTokenInput = tokenizer.nextToken(); + String surlInput = tokenizer.nextToken(); - } + if ((!requestTokenInput.startsWith("requestToken=")) || (!surlInput.startsWith("surl="))) { - String requestTokenInput = tokenizer.nextToken(); - String surlInput = tokenizer.nextToken(); + log.trace("putTaskStatus() - input error"); - if ((!requestTokenInput.startsWith("requestToken=")) - || (!surlInput.startsWith("surl="))) { + validationResponse = Response.status(400).build(); + return false; - log.trace("putTaskStatus() - input error"); + } - validationResponse = Response.status(400).build(); - return false; + requestToken = requestTokenInput.substring(requestTokenInput.indexOf('=') + 1); + String surlString = surlInput.substring(surlInput.indexOf('=') + 1); - } + if ((requestToken.length() == 0) || (surlString.length() == 0)) { - requestToken = requestTokenInput - .substring(requestTokenInput.indexOf('=') + 1); - String surlString = surlInput.substring(surlInput.indexOf('=') + 1); + log.trace("putTaskStatus() - input error"); - if ((requestToken.length() == 0) || (surlString.length() == 0)) { + validationResponse = Response.status(400).build(); + return false; - log.trace("putTaskStatus() - input error"); + } - validationResponse = Response.status(400).build(); - return false; + if (!TokenValidator.valid(requestToken)) { + validationResponse = + Response.status(400).entity("Invalid token: " + requestToken + " \n\n").build(); + return false; + } - } + if (!validateSurl(surlString)) { + return false; + } - if(!TokenValidator.valid(requestToken)){ - validationResponse = Response.status(400).entity("Invalid token: " + requestToken +" \n\n").build(); - return false; - } - - if (!validateSurl(surlString)) { - return false; - } + return true; + } - return true; - } + public String getRequestToken() { - public String getRequestToken() { + return requestToken; + } - return requestToken; - } + public StoRI getStoRI() { - public StoRI getStoRI() { + return stori; + } - return stori; - } + public Response getResponse() { - public Response getResponse() { + return validationResponse; + } - return validationResponse; - } + private boolean validateSurl(String surlString) { - private boolean validateSurl(String surlString) { + TSURL surl; - TSURL surl; + if (!SURLValidator.valid(surlString)) { + validationResponse = + Response.status(400).entity("Invalid surl: " + surlString + "\n\n").build(); + return false; + } - if(!SURLValidator.valid(surlString)){ - validationResponse = Response.status(400).entity("Invalid surl: " + surlString + "\n\n").build(); - return false; - } - - try { + try { - surl = TSURL.makeFromStringValidate(surlString); + surl = TSURL.makeFromStringValidate(surlString); - } catch (InvalidTSURLAttributesException e) { - validationResponse = Response.status(400).build(); - return false; - } - try { - stori = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl); - } catch (Exception e) { - log.warn("Unable to build a stori for surl {} UnapprochableSurlException: {}" , surl , e.getMessage(),e); - return false; - } - return true; - } + } catch (InvalidTSURLAttributesException e) { + validationResponse = Response.status(400).build(); + return false; + } + try { + stori = Namespace.getInstance().resolveStoRIbySURL(surl); + } catch (Exception e) { + log.warn("Unable to build a stori for surl {} UnapprochableSurlException: {}", surl, + e.getMessage(), e); + return false; + } + return true; + } } diff --git a/src/main/java/it/grid/storm/tape/recalltable/persistence/PropertiesDB.java b/src/main/java/it/grid/storm/tape/recalltable/persistence/PropertiesDB.java index 33e24d9f2..8bfcf2068 100644 --- a/src/main/java/it/grid/storm/tape/recalltable/persistence/PropertiesDB.java +++ b/src/main/java/it/grid/storm/tape/recalltable/persistence/PropertiesDB.java @@ -7,7 +7,7 @@ */ package it.grid.storm.tape.recalltable.persistence; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; import it.grid.storm.persistence.exceptions.DataAccessException; import it.grid.storm.persistence.model.TapeRecallTO; import it.grid.storm.srm.types.TRequestToken; @@ -35,7 +35,7 @@ public class PropertiesDB { private static final Logger log = LoggerFactory.getLogger(PropertiesDB.class); - private static Configuration config = Configuration.getInstance(); + private static StormConfiguration config = StormConfiguration.getInstance(); private final String dataFileName = "recall-table.txt"; private final String propertiesDBName; diff --git a/src/main/java/it/grid/storm/tape/recalltable/resources/TaskResource.java b/src/main/java/it/grid/storm/tape/recalltable/resources/TaskResource.java index 60c8eff06..143bb3d87 100644 --- a/src/main/java/it/grid/storm/tape/recalltable/resources/TaskResource.java +++ b/src/main/java/it/grid/storm/tape/recalltable/resources/TaskResource.java @@ -13,27 +13,6 @@ import static javax.ws.rs.core.Response.Status.INTERNAL_SERVER_ERROR; import static javax.ws.rs.core.Response.Status.NOT_FOUND; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; -import it.grid.storm.config.Configuration; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; -import it.grid.storm.namespace.StoRI; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.model.TapeRecallTO; -import it.grid.storm.rest.metadata.service.ResourceNotFoundException; -import it.grid.storm.rest.metadata.service.ResourceService; -import it.grid.storm.tape.recalltable.TapeRecallCatalog; -import it.grid.storm.tape.recalltable.TapeRecallException; -import it.grid.storm.tape.recalltable.model.PutTapeRecallStatusLogic; -import it.grid.storm.tape.recalltable.model.PutTapeRecallStatusValidator; -import it.grid.storm.tape.recalltable.model.TapeRecallStatus; -import it.grid.storm.tape.recalltable.model.TaskInsertRequestValidator; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; @@ -55,16 +34,33 @@ import javax.ws.rs.core.GenericEntity; import javax.ws.rs.core.Response; -/** - * @author Riccardo Zappi - * @author Enrico Vianello - */ +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; + +import it.grid.storm.catalogs.TapeRecallCatalog; +import it.grid.storm.config.StormConfiguration; +import it.grid.storm.namespace.Namespace; +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.StoRI; +import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.model.TapeRecallTO; +import it.grid.storm.rest.metadata.service.ResourceNotFoundException; +import it.grid.storm.rest.metadata.service.ResourceService; +import it.grid.storm.tape.recalltable.TapeRecallException; +import it.grid.storm.tape.recalltable.model.PutTapeRecallStatusLogic; +import it.grid.storm.tape.recalltable.model.PutTapeRecallStatusValidator; +import it.grid.storm.tape.recalltable.model.TapeRecallStatus; +import it.grid.storm.tape.recalltable.model.TaskInsertRequestValidator; + @Path("/recalltable/task") public class TaskResource { private static final Logger log = LoggerFactory.getLogger(TaskResource.class); - private static Configuration config = Configuration.getInstance(); + private static StormConfiguration config = StormConfiguration.getInstance(); private ResourceService service; private TapeRecallCatalog recallCatalog; @@ -73,8 +69,8 @@ public class TaskResource { public TaskResource() throws NamespaceException { - NamespaceInterface ns = NamespaceDirector.getNamespace(); - recallCatalog = new TapeRecallCatalog(); + Namespace ns = Namespace.getInstance(); + recallCatalog = TapeRecallCatalog.getInstance(); service = new ResourceService(ns.getAllDefinedVFS(), ns.getAllDefinedMappingRules()); } diff --git a/src/main/java/it/grid/storm/tape/recalltable/resources/TasksCardinality.java b/src/main/java/it/grid/storm/tape/recalltable/resources/TasksCardinality.java index 54a50359c..204e04162 100644 --- a/src/main/java/it/grid/storm/tape/recalltable/resources/TasksCardinality.java +++ b/src/main/java/it/grid/storm/tape/recalltable/resources/TasksCardinality.java @@ -7,8 +7,8 @@ */ package it.grid.storm.tape.recalltable.resources; +import it.grid.storm.catalogs.TapeRecallCatalog; import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.tape.recalltable.TapeRecallCatalog; import it.grid.storm.tape.recalltable.TapeRecallException; import javax.ws.rs.GET; @@ -19,74 +19,70 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** - * @author ritz - * - */ @Path("/recalltable/cardinality/tasks/") public class TasksCardinality { - private static final Logger log = LoggerFactory.getLogger(TasksCardinality.class); + private static final Logger log = LoggerFactory.getLogger(TasksCardinality.class); - /** - * Get the number of tasks that are queued. - * - * @return - * @throws TapeRecallException - */ - @GET - @Path("/queued") - @Produces("text/plain") - public Response getNumberQueued() { + /** + * Get the number of tasks that are queued. + * + * @return + * @throws TapeRecallException + */ + @GET + @Path("/queued") + @Produces("text/plain") + public Response getNumberQueued() { - int nQueued = 0; + TapeRecallCatalog rtCat = TapeRecallCatalog.getInstance(); + int nQueued = 0; - try { + try { - TapeRecallCatalog rtCat = new TapeRecallCatalog(); - nQueued = rtCat.getNumberTaskQueued(); + nQueued = rtCat.getNumberTaskQueued(); - } catch (DataAccessException e) { + } catch (DataAccessException e) { - String errorStr = "Unable to use RecallTable DB."; - log.error(errorStr, e); - return Response.serverError().entity(errorStr).build(); - } + String errorStr = "Unable to use RecallTable DB."; + log.error(errorStr, e); + return Response.serverError().entity(errorStr).build(); + } - if (nQueued > 0) { - log.info("Number of tasks queued = {}", nQueued); - } else { - log.trace("Number of tasks queued = {}", nQueued); - } - return Response.ok().entity(Integer.toString(nQueued)).build(); - } + if (nQueued > 0) { + log.info("Number of tasks queued = {}", nQueued); + } else { + log.trace("Number of tasks queued = {}", nQueued); + } + return Response.ok().entity(Integer.toString(nQueued)).build(); + } - /** - * Get the number of tasks that are ready for take over. - * - * @return - */ - @GET - @Path("/readyTakeOver") - @Produces("text/plain") - public Response getReadyForTakeover() { + /** + * Get the number of tasks that are ready for take over. + * + * @return + */ + @GET + @Path("/readyTakeOver") + @Produces("text/plain") + public Response getReadyForTakeover() { - int nReadyForTakeover = 0; + TapeRecallCatalog rtCat = TapeRecallCatalog.getInstance(); + int nReadyForTakeover = 0; - try { + try { - TapeRecallCatalog rtCat = new TapeRecallCatalog(); - nReadyForTakeover = rtCat.getReadyForTakeOver(); + nReadyForTakeover = rtCat.getReadyForTakeOver(); - } catch (DataAccessException e) { + } catch (DataAccessException e) { - String errorStr = "Unable to use RecallTable DB."; - log.error(errorStr, e); - return Response.serverError().entity(errorStr).build(); - } + String errorStr = "Unable to use RecallTable DB."; + log.error(errorStr, e); + return Response.serverError().entity(errorStr).build(); + } - log.debug("Number of tasks queued = {}", nReadyForTakeover); - return Response.ok().entity(Integer.toString(nReadyForTakeover)).build(); - } + log.debug("Number of tasks queued = {}", nReadyForTakeover); + return Response.ok().entity(Integer.toString(nReadyForTakeover)).build(); + } -} \ No newline at end of file +} diff --git a/src/main/java/it/grid/storm/tape/recalltable/resources/TasksResource.java b/src/main/java/it/grid/storm/tape/recalltable/resources/TasksResource.java index b235ab072..1e09cc1be 100644 --- a/src/main/java/it/grid/storm/tape/recalltable/resources/TasksResource.java +++ b/src/main/java/it/grid/storm/tape/recalltable/resources/TasksResource.java @@ -10,9 +10,9 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import it.grid.storm.config.Configuration; +import it.grid.storm.catalogs.TapeRecallCatalog; +import it.grid.storm.config.StormConfiguration; import it.grid.storm.persistence.model.TapeRecallTO; -import it.grid.storm.tape.recalltable.TapeRecallCatalog; import it.grid.storm.tape.recalltable.TapeRecallException; import it.grid.storm.tape.recalltable.model.TapeRecallStatus; @@ -42,246 +42,246 @@ @Path("/recalltable/tasks") public class TasksResource { - private static final Logger log = LoggerFactory.getLogger(TasksResource.class); - - private static Configuration config = Configuration.getInstance(); - - /** - * Return recall tasks for being taken over. The status of the tasks that - * are returned is set to in progress. - * - * @param input a key value pair in which the value is the number - * of results to be returned in the - * @return the tasks ready to takeover - * @throws TapeRecallException - */ - @PUT - @Consumes("text/plain") - @Produces("text/plain") - public Response putTakeoverTasks(InputStream input) throws TapeRecallException { - - // retrieve the Input String - String inputStr = buildInputString(input); - - log.debug("@PUT (input string) = '{}'" , inputStr); - - // retrieve the number of tasks to takeover (default = 1) - int numbOfTask = 1; - - // retrieve value from Body param - String keyTakeover = config.getTaskoverKey(); - - int eqIndex = inputStr.indexOf('='); - - if (eqIndex > 0) { - - String value = inputStr.substring(eqIndex); - String key = inputStr.substring(0, eqIndex); - - if (key.equals(keyTakeover)) { - - try { - - // trim out the '\n' end. - numbOfTask = Integer.valueOf(value.substring(1, value.length() - 1)); - - } catch (NumberFormatException e) { - - throw new TapeRecallException("Unable to understand " + - "the number value = '" + value + "'"); - } - } - } - - // retrieve the tasks - List tasks = new TapeRecallCatalog().takeoverNTasksWithDoubles(numbOfTask); - - HashMap> groupTaskMap = buildGroupTaskMap(tasks); - - List groupTasks = Lists.newArrayList(); - - for (List groupTaskList : groupTaskMap.values()) { - - try { - - groupTasks.add(makeOne(groupTaskList)); - - } catch (IllegalArgumentException e) { - - log.error("Unable to makeOne the task list . IllegalArgumentException : {}" , e.getMessage() , e); - log.error("Erroneous task list (long output): {}" , groupTaskList.toString()); - log.error("Skip the erroneous task list and go on...Please contact StoRM support"); - } - } - - if (tasks.size() > groupTasks.size()) { - - log.debug("Taking over some multy-group tasks"); - } - - log.debug("Number of tasks recalled : <{}> over <{}> tasks requested" , groupTasks.size() , tasks.size()); - - // need a generic entity - GenericEntity> entity = - new GenericEntity>(tasks) {}; - - return Response.ok(entity).build(); - } - - /** - * Creates a map with the taskIds as keys and the list of tasks related to - * each taskId (key) as value - * - * @param tasks - * @return - */ - private HashMap> buildGroupTaskMap(List tasks) { - - HashMap> groupTaskMap = Maps.newHashMap(); - - for (TapeRecallTO task : tasks) { - - List taskList = - groupTaskMap.get(task.getGroupTaskId()); - - if (taskList == null) { - - taskList = Lists.newArrayList(); - groupTaskMap.put(task.getGroupTaskId(), taskList); - } - - taskList.add(task); - } - - return groupTaskMap; - } - - /** - * Given a list of tasks with the same taskId produces a single task merging - * the list members - * - * @param recallTasks - * @return - */ - private TapeRecallTO makeOne(List recallTasks) { - - TapeRecallTO taskTO = new TapeRecallTO(); - - UUID taskId = recallTasks.get(0).getTaskId(); - - // verify that all have the same task id - for (TapeRecallTO recallTask : recallTasks) { - - if (!recallTask.getTaskId().equals(taskId)) { - - log.error("Received a list of not omogeneous tasks, the taskid '{}' is not matched by : {}" , taskId , recallTask); - - throw new IllegalArgumentException( - "Received a list of not omogeneous tasks"); - } - } - - for (TapeRecallTO recallTask : recallTasks) { - - // set common fields from any of the tasks - taskTO.setTaskId(recallTask.getTaskId()); - taskTO.setGroupTaskId(recallTask.getGroupTaskId()); - taskTO.setRequestToken(recallTask.getRequestToken()); - taskTO.setRequestType(recallTask.getRequestType()); - taskTO.setFileName(recallTask.getFileName()); - taskTO.setUserID(recallTask.getUserID()); - taskTO.setVoName(recallTask.getVoName()); - taskTO.setStatus(TapeRecallStatus.QUEUED); - - break; - } - - /* - * merge task on recall related fields to have a pin that starts as soon as - * requested and last as long as needed - */ - - int maxRetryAttempt = 0; - - Date minInsertionInstant = null; - Date minDeferredRecallInstant = null; - Date maxPinExpirationInstant = null; - - for (TapeRecallTO recallTask : recallTasks) { - - if (recallTask.getRetryAttempt() > maxRetryAttempt) { - maxRetryAttempt = recallTask.getRetryAttempt(); - } - - if (minInsertionInstant == null - || recallTask.getInsertionInstant().before(minInsertionInstant)) { - - minInsertionInstant = recallTask.getInsertionInstant(); - } - - if (minDeferredRecallInstant == null - || recallTask.getDeferredRecallInstant().before(minDeferredRecallInstant)) { - - minDeferredRecallInstant = recallTask.getDeferredRecallInstant(); - } - - Date currentPinExpirationInstant = - new Date(recallTask.getDeferredRecallInstant().getTime() + (recallTask.getPinLifetime() * 1000)); - - if (maxPinExpirationInstant == null - || currentPinExpirationInstant.after(maxPinExpirationInstant)) { - - maxPinExpirationInstant = currentPinExpirationInstant; - } - } - - taskTO.setRetryAttempt(maxRetryAttempt); - taskTO.setInsertionInstant(minInsertionInstant); - taskTO.setDeferredRecallInstant(minDeferredRecallInstant); - - int pinLifeTime = (int) (maxPinExpirationInstant.getTime() - minDeferredRecallInstant.getTime()) / 1000; - - taskTO.setPinLifetime(pinLifeTime); - - return taskTO; - } - - /** - * Utility method. - * - */ - private String buildInputString(InputStream input) { - - BufferedReader reader = new BufferedReader(new InputStreamReader(input)); - - StringBuilder sb = new StringBuilder(); - - String line = null; - - try { - - while ((line = reader.readLine()) != null) { - - sb.append(line + "\n"); - } - - } catch (IOException e) { - - log.error(e.getMessage(), e); - - } finally { - - try { - - input.close(); - - } catch (IOException e) { - - log.error(e.getMessage(), e); - } - } - - return sb.toString(); - } - -} \ No newline at end of file + private static final Logger log = LoggerFactory.getLogger(TasksResource.class); + + private static StormConfiguration config = StormConfiguration.getInstance(); + + /** + * Return recall tasks for being taken over. The status of the tasks that are returned is set to + * in progress. + * + * @param input a key value pair in which the value is the number of results to be returned in the + * @return the tasks ready to takeover + * @throws TapeRecallException + */ + @PUT + @Consumes("text/plain") + @Produces("text/plain") + public Response putTakeoverTasks(InputStream input) throws TapeRecallException { + + // retrieve the Input String + String inputStr = buildInputString(input); + + log.debug("@PUT (input string) = '{}'", inputStr); + + // retrieve the number of tasks to takeover (default = 1) + int numbOfTask = 1; + + // retrieve value from Body param + String keyTakeover = config.getTaskoverKey(); + + int eqIndex = inputStr.indexOf('='); + + if (eqIndex > 0) { + + String value = inputStr.substring(eqIndex); + String key = inputStr.substring(0, eqIndex); + + if (key.equals(keyTakeover)) { + + try { + + // trim out the '\n' end. + numbOfTask = Integer.valueOf(value.substring(1, value.length() - 1)); + + } catch (NumberFormatException e) { + + throw new TapeRecallException( + "Unable to understand " + "the number value = '" + value + "'"); + } + } + } + + // retrieve the tasks + List tasks = + TapeRecallCatalog.getInstance().takeoverNTasksWithDoubles(numbOfTask); + + HashMap> groupTaskMap = buildGroupTaskMap(tasks); + + List groupTasks = Lists.newArrayList(); + + for (List groupTaskList : groupTaskMap.values()) { + + try { + + groupTasks.add(makeOne(groupTaskList)); + + } catch (IllegalArgumentException e) { + + log.error("Unable to makeOne the task list . IllegalArgumentException : {}", e.getMessage(), + e); + log.error("Erroneous task list (long output): {}", groupTaskList.toString()); + log.error("Skip the erroneous task list and go on...Please contact StoRM support"); + } + } + + if (tasks.size() > groupTasks.size()) { + + log.debug("Taking over some multy-group tasks"); + } + + log.debug("Number of tasks recalled : <{}> over <{}> tasks requested", groupTasks.size(), + tasks.size()); + + // need a generic entity + GenericEntity> entity = new GenericEntity>(tasks) {}; + + return Response.ok(entity).build(); + } + + /** + * Creates a map with the taskIds as keys and the list of tasks related to each taskId (key) as + * value + * + * @param tasks + * @return + */ + private HashMap> buildGroupTaskMap(List tasks) { + + HashMap> groupTaskMap = Maps.newHashMap(); + + for (TapeRecallTO task : tasks) { + + List taskList = groupTaskMap.get(task.getGroupTaskId()); + + if (taskList == null) { + + taskList = Lists.newArrayList(); + groupTaskMap.put(task.getGroupTaskId(), taskList); + } + + taskList.add(task); + } + + return groupTaskMap; + } + + /** + * Given a list of tasks with the same taskId produces a single task merging the list members + * + * @param recallTasks + * @return + */ + private TapeRecallTO makeOne(List recallTasks) { + + TapeRecallTO taskTO = new TapeRecallTO(); + + UUID taskId = recallTasks.get(0).getTaskId(); + + // verify that all have the same task id + for (TapeRecallTO recallTask : recallTasks) { + + if (!recallTask.getTaskId().equals(taskId)) { + + log.error("Received a list of not omogeneous tasks, the taskid '{}' is not matched by : {}", + taskId, recallTask); + + throw new IllegalArgumentException("Received a list of not omogeneous tasks"); + } + } + + for (TapeRecallTO recallTask : recallTasks) { + + // set common fields from any of the tasks + taskTO.setTaskId(recallTask.getTaskId()); + taskTO.setGroupTaskId(recallTask.getGroupTaskId()); + taskTO.setRequestToken(recallTask.getRequestToken()); + taskTO.setRequestType(recallTask.getRequestType()); + taskTO.setFileName(recallTask.getFileName()); + taskTO.setUserID(recallTask.getUserID()); + taskTO.setVoName(recallTask.getVoName()); + taskTO.setStatus(TapeRecallStatus.QUEUED); + + break; + } + + /* + * merge task on recall related fields to have a pin that starts as soon as requested and last + * as long as needed + */ + + int maxRetryAttempt = 0; + + Date minInsertionInstant = null; + Date minDeferredRecallInstant = null; + Date maxPinExpirationInstant = null; + + for (TapeRecallTO recallTask : recallTasks) { + + if (recallTask.getRetryAttempt() > maxRetryAttempt) { + maxRetryAttempt = recallTask.getRetryAttempt(); + } + + if (minInsertionInstant == null + || recallTask.getInsertionInstant().before(minInsertionInstant)) { + + minInsertionInstant = recallTask.getInsertionInstant(); + } + + if (minDeferredRecallInstant == null + || recallTask.getDeferredRecallInstant().before(minDeferredRecallInstant)) { + + minDeferredRecallInstant = recallTask.getDeferredRecallInstant(); + } + + Date currentPinExpirationInstant = new Date( + recallTask.getDeferredRecallInstant().getTime() + (recallTask.getPinLifetime() * 1000)); + + if (maxPinExpirationInstant == null + || currentPinExpirationInstant.after(maxPinExpirationInstant)) { + + maxPinExpirationInstant = currentPinExpirationInstant; + } + } + + taskTO.setRetryAttempt(maxRetryAttempt); + taskTO.setInsertionInstant(minInsertionInstant); + taskTO.setDeferredRecallInstant(minDeferredRecallInstant); + + int pinLifeTime = + (int) (maxPinExpirationInstant.getTime() - minDeferredRecallInstant.getTime()) / 1000; + + taskTO.setPinLifetime(pinLifeTime); + + return taskTO; + } + + /** + * Utility method. + * + */ + private String buildInputString(InputStream input) { + + BufferedReader reader = new BufferedReader(new InputStreamReader(input)); + + StringBuilder sb = new StringBuilder(); + + String line = null; + + try { + + while ((line = reader.readLine()) != null) { + + sb.append(line + "\n"); + } + + } catch (IOException e) { + + log.error(e.getMessage(), e); + + } finally { + + try { + + input.close(); + + } catch (IOException e) { + + log.error(e.getMessage(), e); + } + } + + return sb.toString(); + } + +} diff --git a/src/main/java/it/grid/storm/util/VirtualFSHelper.java b/src/main/java/it/grid/storm/util/VirtualFSHelper.java index d44a12945..48d387fac 100644 --- a/src/main/java/it/grid/storm/util/VirtualFSHelper.java +++ b/src/main/java/it/grid/storm/util/VirtualFSHelper.java @@ -8,7 +8,7 @@ import com.google.common.collect.Lists; -import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.Namespace; import it.grid.storm.namespace.model.Capability; import it.grid.storm.namespace.model.Quota; import it.grid.storm.namespace.model.VirtualFS; @@ -43,7 +43,7 @@ public static final boolean isGPFSQuotaEnabledForVFS(VirtualFS vfs) { public static List getGPFSQuotaEnabledFilesystems() { List fss = Lists.newArrayList(); - List allVFS = NamespaceDirector.getNamespace().getAllDefinedVFS(); + List allVFS = Namespace.getInstance().getAllDefinedVFS(); for (VirtualFS vfs : allVFS) { if (isGPFSQuotaEnabledForVFS(vfs)) diff --git a/src/main/java/it/grid/storm/xmlrpc/XMLRPCExecutor.java b/src/main/java/it/grid/storm/xmlrpc/XMLRPCExecutor.java index f0ff3ddb0..e178bb0db 100644 --- a/src/main/java/it/grid/storm/xmlrpc/XMLRPCExecutor.java +++ b/src/main/java/it/grid/storm/xmlrpc/XMLRPCExecutor.java @@ -4,9 +4,16 @@ */ package it.grid.storm.xmlrpc; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.common.OperationType; import it.grid.storm.health.BookKeeper; -import it.grid.storm.health.HealthDirector; +import it.grid.storm.health.HealthMonitor; import it.grid.storm.health.LogEvent; import it.grid.storm.synchcall.SynchcallDispatcher; import it.grid.storm.synchcall.SynchcallDispatcherFactory; @@ -17,151 +24,125 @@ import it.grid.storm.xmlrpc.converter.Converter; import it.grid.storm.xmlrpc.converter.ConveterFactory; -import java.util.ArrayList; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class is part of the StoRM project. - * - * @author lucamag - * @date May 27, 2008 - */ - public class XMLRPCExecutor { - private static ArrayList bookKeepers = HealthDirector - .getHealthMonitor().getBookKeepers(); - - /** - * Logger - */ - private static final Logger log = LoggerFactory - .getLogger(XMLRPCExecutor.class); - - /** - * @param type - * @param inputParam - * @return - */ - - public Map execute(OperationType type, Map inputParam) - throws StoRMXmlRpcException { - - long startTime = System.currentTimeMillis(); - long duration = System.nanoTime(); - log.debug("Executing a '{}' call" , type.toString()); - log.debug(" Structure size : {}" , inputParam.size()); - Converter converter = ConveterFactory.getConverter(type); - SynchcallDispatcher dispatcher = SynchcallDispatcherFactory.getDispatcher(); - - log.debug("Converting input data with Converter {}", converter.getClass().getName()); - InputData inputData = converter.convertToInputData(inputParam); - - log.debug("Dispatching request using SynchcallDispatcher {}" - , dispatcher.getClass().getName()); - OutputData outputData; - try { - outputData = dispatcher.processRequest(type, inputData); - } catch (IllegalArgumentException e) { - log - .error("Unable to process the request. Error from the SynchcallDispatcher. IllegalArgumentException: {}" - , e.getMessage(),e); - throw new StoRMXmlRpcException( - "Unable to process the request. IllegalArgumentException: " - + e.getMessage()); - } catch (CommandException e) { - log - .error("Unable to execute the request. Error from the SynchcallDispatcher. CommandException: {}" - , e.getMessage(),e); - throw new StoRMXmlRpcException( - "Unable to process the request. CommandException: " + e.getMessage()); - } - Map outputParam = converter.convertFromOutputData(outputData); - duration = System.nanoTime() - duration; - - logExecution(convertOperationType(type), - DataHelper.getRequestor(inputData), startTime, - TimeUnit.NANOSECONDS.toMillis(duration), - outputData.isSuccess()); - - return outputParam; - } - - /** - * Method used to book the execution of SYNCH operation - */ - private void logExecution(it.grid.storm.health.OperationType opType, - String dn, long startTime, long duration, boolean successResult) { - - LogEvent event = new LogEvent(opType, dn, startTime, duration, - successResult); - if (!(bookKeepers.isEmpty())) { - log.debug("Found # {} bookeepers." , bookKeepers.size()); - for (int i = 0; i < bookKeepers.size(); i++) { - bookKeepers.get(i).addLogEvent(event); - } - } - } - - /** - * TOREMOVE! this is a temporary code since two different class of - * OperationTYpe are defined. This is to convert the two kind of operation - * type, from the onw used here, enum based, to the one requested by the - * hearthbeat. - */ - private it.grid.storm.health.OperationType convertOperationType( - OperationType type) { - - switch (type) { - case PTG: - return it.grid.storm.health.OperationType.PTG; - case SPTG: - return it.grid.storm.health.OperationType.SPTG; - case PTP: - return it.grid.storm.health.OperationType.PTP; - case SPTP: - return it.grid.storm.health.OperationType.SPTP; - case COPY: - return it.grid.storm.health.OperationType.COPY; - case BOL: - return it.grid.storm.health.OperationType.BOL; - case AF: - return it.grid.storm.health.OperationType.AF; - case AR: - return it.grid.storm.health.OperationType.AR; - case EFL: - return it.grid.storm.health.OperationType.EFL; - case GSM: - return it.grid.storm.health.OperationType.GSM; - case GST: - return it.grid.storm.health.OperationType.GST; - case LS: - return it.grid.storm.health.OperationType.LS; - case MKD: - return it.grid.storm.health.OperationType.MKD; - case MV: - return it.grid.storm.health.OperationType.MV; - case PNG: - return it.grid.storm.health.OperationType.PNG; - case PD: - return it.grid.storm.health.OperationType.PD; - case RF: - return it.grid.storm.health.OperationType.RF; - case RESSP: - return it.grid.storm.health.OperationType.RS; - case RELSP: - return it.grid.storm.health.OperationType.RSP; - case RM: - return it.grid.storm.health.OperationType.RM; - case RMD: - return it.grid.storm.health.OperationType.RMD; - default: - return it.grid.storm.health.OperationType.UNDEF; - } - } + private static List bookKeepers = + HealthMonitor.getInstance().getBookKeepers(); + + private static final Logger log = LoggerFactory.getLogger(XMLRPCExecutor.class); + + /** + * @param type + * @param inputParam + * @return + */ + + public Map execute(OperationType type, Map inputParam) throws StoRMXmlRpcException { + + long startTime = System.currentTimeMillis(); + long duration = System.nanoTime(); + log.debug("Executing a '{}' call", type.toString()); + log.debug(" Structure size : {}", inputParam.size()); + Converter converter = ConveterFactory.getConverter(type); + SynchcallDispatcher dispatcher = SynchcallDispatcherFactory.getDispatcher(); + + log.debug("Converting input data with Converter {}", converter.getClass().getName()); + InputData inputData = converter.convertToInputData(inputParam); + + log.debug("Dispatching request using SynchcallDispatcher {}", dispatcher.getClass().getName()); + OutputData outputData; + try { + outputData = dispatcher.processRequest(type, inputData); + } catch (IllegalArgumentException e) { + log.error( + "Unable to process the request. Error from the SynchcallDispatcher. IllegalArgumentException: {}", + e.getMessage(), e); + throw new StoRMXmlRpcException( + "Unable to process the request. IllegalArgumentException: " + e.getMessage()); + } catch (CommandException e) { + log.error( + "Unable to execute the request. Error from the SynchcallDispatcher. CommandException: {}", + e.getMessage(), e); + throw new StoRMXmlRpcException( + "Unable to process the request. CommandException: " + e.getMessage()); + } + Map outputParam = converter.convertFromOutputData(outputData); + duration = System.nanoTime() - duration; + + logExecution(convertOperationType(type), DataHelper.getRequestor(inputData), startTime, + TimeUnit.NANOSECONDS.toMillis(duration), outputData.isSuccess()); + + return outputParam; + } + + /** + * Method used to book the execution of SYNCH operation + */ + private void logExecution(it.grid.storm.health.OperationType opType, String dn, long startTime, + long duration, boolean successResult) { + + LogEvent event = new LogEvent(opType, dn, startTime, duration, successResult); + if (!(bookKeepers.isEmpty())) { + log.debug("Found # {} bookeepers.", bookKeepers.size()); + for (int i = 0; i < bookKeepers.size(); i++) { + bookKeepers.get(i).addLogEvent(event); + } + } + } + + /** + * TOREMOVE! this is a temporary code since two different class of OperationTYpe are defined. This + * is to convert the two kind of operation type, from the onw used here, enum based, to the one + * requested by the hearthbeat. + */ + private it.grid.storm.health.OperationType convertOperationType(OperationType type) { + + switch (type) { + case PTG: + return it.grid.storm.health.OperationType.PTG; + case SPTG: + return it.grid.storm.health.OperationType.SPTG; + case PTP: + return it.grid.storm.health.OperationType.PTP; + case SPTP: + return it.grid.storm.health.OperationType.SPTP; + case COPY: + return it.grid.storm.health.OperationType.COPY; + case BOL: + return it.grid.storm.health.OperationType.BOL; + case AF: + return it.grid.storm.health.OperationType.AF; + case AR: + return it.grid.storm.health.OperationType.AR; + case EFL: + return it.grid.storm.health.OperationType.EFL; + case GSM: + return it.grid.storm.health.OperationType.GSM; + case GST: + return it.grid.storm.health.OperationType.GST; + case LS: + return it.grid.storm.health.OperationType.LS; + case MKD: + return it.grid.storm.health.OperationType.MKD; + case MV: + return it.grid.storm.health.OperationType.MV; + case PNG: + return it.grid.storm.health.OperationType.PNG; + case PD: + return it.grid.storm.health.OperationType.PD; + case RF: + return it.grid.storm.health.OperationType.RF; + case RESSP: + return it.grid.storm.health.OperationType.RS; + case RELSP: + return it.grid.storm.health.OperationType.RSP; + case RM: + return it.grid.storm.health.OperationType.RM; + case RMD: + return it.grid.storm.health.OperationType.RMD; + default: + return it.grid.storm.health.OperationType.UNDEF; + } + } } diff --git a/src/main/java/it/grid/storm/xmlrpc/XMLRPCHttpServer.java b/src/main/java/it/grid/storm/xmlrpc/XMLRPCHttpServer.java index e60f4ab5c..586ec28da 100644 --- a/src/main/java/it/grid/storm/xmlrpc/XMLRPCHttpServer.java +++ b/src/main/java/it/grid/storm/xmlrpc/XMLRPCHttpServer.java @@ -31,7 +31,7 @@ import com.codahale.metrics.jetty8.InstrumentedHandler; -import it.grid.storm.config.Configuration; +import it.grid.storm.config.StormConfiguration; import it.grid.storm.metrics.NamedInstrumentedSelectChannelConnector; import it.grid.storm.metrics.NamedInstrumentedThreadPool; import it.grid.storm.rest.JettyThread; @@ -53,9 +53,6 @@ public final class XMLRPCHttpServer { */ private boolean running = false; - public static final int DEFAULT_MAX_THREAD_NUM = 256; - public static final int DEFAULT_MAX_QUEUE_SIZE = 1000; - /** * @param port * @param maxThreadNum @@ -69,28 +66,17 @@ public XMLRPCHttpServer(int port, int maxThreadNum, int maxQueueSize) private void configureThreadPool(Server s, int maxThreadNum, int maxQueueSize) { - int threadNumber = maxThreadNum; - - if (threadNumber <= 0) { - threadNumber = DEFAULT_MAX_THREAD_NUM; - } - - int queueSize = maxQueueSize; - - if (queueSize <= 0) { - queueSize = DEFAULT_MAX_QUEUE_SIZE; - } NamedInstrumentedThreadPool tp = new NamedInstrumentedThreadPool("xmlrpc", METRIC_REGISTRY.getRegistry()); - tp.setMaxThreads(threadNumber); - tp.setMaxQueued(queueSize); + tp.setMaxThreads(maxThreadNum); + tp.setMaxQueued(maxQueueSize); s.setThreadPool(tp); - LOG.info("Configured XMLRPC server threadpool: maxThreads={}, maxQueueSize={}", threadNumber, - queueSize); + LOG.info("Configured XMLRPC server threadpool: maxThreads={}, maxQueueSize={}", maxThreadNum, + maxQueueSize); } @@ -107,13 +93,13 @@ private void configureHandler(Server server) throws StoRMXmlRpcException { ServletContextHandler servletContextHandler = new ServletContextHandler(); servletContextHandler.addServlet(new ServletHolder(servlet), "/"); - Boolean isTokenEnabled = Configuration.getInstance().getXmlRpcTokenEnabled(); + Boolean isTokenEnabled = StormConfiguration.getInstance().getXmlRpcTokenEnabled(); if (isTokenEnabled) { LOG.info("Enabling security filter for XML-RPC requests"); - String token = Configuration.getInstance().getXmlRpcToken(); + String token = StormConfiguration.getInstance().getXmlRpcToken(); if (token == null || token.isEmpty()) { diff --git a/src/test/java/it/grid/storm/balancer/cache/ResponsivenessCacheTest.java b/src/test/java/it/grid/storm/balancer/cache/ResponsivenessCacheTest.java index 9ff1f4979..306e92c1d 100644 --- a/src/test/java/it/grid/storm/balancer/cache/ResponsivenessCacheTest.java +++ b/src/test/java/it/grid/storm/balancer/cache/ResponsivenessCacheTest.java @@ -6,21 +6,27 @@ import static it.grid.storm.balancer.cache.Responsiveness.RESPONSIVE; import static it.grid.storm.balancer.cache.Responsiveness.UNRESPONSIVE; -import static it.grid.storm.config.Configuration.CONFIG_FILE_PATH; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; - +import static org.junit.Assert.fail; +import java.io.IOException; +import org.apache.commons.configuration.ConfigurationException; import org.junit.Before; import org.junit.Test; - import it.grid.storm.balancer.BalancerUtils; import it.grid.storm.balancer.Node; +import it.grid.storm.config.StormConfiguration; public class ResponsivenessCacheTest extends BalancerUtils { static { - System.setProperty(CONFIG_FILE_PATH, "storm.properties"); + try { + StormConfiguration.init("src/test/resources/storm.properties"); + } catch (ConfigurationException | IOException e) { + e.printStackTrace(); + fail(); + } } private final ResponsivenessCache CACHE = ResponsivenessCache.INSTANCE; diff --git a/src/test/java/it/grid/storm/balancer/strategy/BalancingStrategiesTests.java b/src/test/java/it/grid/storm/balancer/strategy/BalancingStrategiesTests.java index cd87ee359..366a2acbd 100644 --- a/src/test/java/it/grid/storm/balancer/strategy/BalancingStrategiesTests.java +++ b/src/test/java/it/grid/storm/balancer/strategy/BalancingStrategiesTests.java @@ -4,7 +4,7 @@ */ package it.grid.storm.balancer.strategy; -import static it.grid.storm.config.Configuration.CONFIG_FILE_PATH; +import static it.grid.storm.config.StormConfiguration.CONFIG_FILE_PATH; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; diff --git a/src/test/java/it/grid/storm/tape/recalltable/resources/TaskResourceTest.java b/src/test/java/it/grid/storm/tape/recalltable/resources/TaskResourceTest.java index 635209472..193a98f4c 100644 --- a/src/test/java/it/grid/storm/tape/recalltable/resources/TaskResourceTest.java +++ b/src/test/java/it/grid/storm/tape/recalltable/resources/TaskResourceTest.java @@ -4,7 +4,7 @@ */ package it.grid.storm.tape.recalltable.resources; -import static it.grid.storm.config.Configuration.CONFIG_FILE_PATH; +import static it.grid.storm.config.StormConfiguration.CONFIG_FILE_PATH; import static it.grid.storm.tape.recalltable.resources.TaskInsertRequest.MAX_RETRY_ATTEMPTS; import static javax.ws.rs.core.Response.Status.BAD_REQUEST; import static javax.ws.rs.core.Response.Status.CREATED; @@ -25,7 +25,7 @@ import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.Response; - +import org.apache.commons.configuration.ConfigurationException; import org.junit.Test; import org.mockito.Mockito; @@ -34,6 +34,8 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.Lists; +import it.grid.storm.catalogs.TapeRecallCatalog; +import it.grid.storm.config.StormConfiguration; import it.grid.storm.griduser.VONameMatchingRule; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; @@ -46,7 +48,6 @@ import it.grid.storm.rest.metadata.service.ResourceService; import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.tape.recalltable.TapeRecallCatalog; public class TaskResourceTest { @@ -69,7 +70,12 @@ public class TaskResourceTest { private TapeRecallCatalog BROKEN_RECALL_CATALOG = getTapeRecallCatalogInsertError(); static { - System.setProperty(CONFIG_FILE_PATH, "storm.properties"); + try { + StormConfiguration.init("src/test/resources/storm.properties"); + } catch (ConfigurationException | IOException e) { + e.printStackTrace(); + fail(); + } } private TapeRecallCatalog getTapeRecallCatalogInsertSuccess(UUID groupTaskId) {