Wednesday, 4 November 2015

HUGE ARCHIVE LOG GENERATION

Checks Points. In Case face huge archive log generation type of issues.

1. Check Jobs, schedules.
2. Check DB activity like SQL,TABLE,PROCEDURE, TRIGGER.
3. Session wise SQL redo generates.
4. Redo log group full / small redo log file size.
5. DB link distributed work.
6. Session lock.
7. Dead locks.
8. Generate AWR/ASH/ADDM report (see section Top 5 Wait Event,Physical Segemnt/Logical).

Note:- Non Impact (SMON redo generation, fregmentation)



***********************************************************************************


## Archiving generation report:
Per day the volume in MBytes of archived logs generated, deleted and of those that haven't yet been deleted by RMAN

SELECT SUM_ARCH.DAY,
         SUM_ARCH.GENERATED_MB,
         SUM_ARCH_DEL.DELETED_MB,
         SUM_ARCH.GENERATED_MB - SUM_ARCH_DEL.DELETED_MB "REMAINING_MB"
    FROM (  SELECT TO_CHAR (COMPLETION_TIME, 'DD/MM/YYYY') DAY,
                   SUM (ROUND ( (blocks * block_size) / (1024 * 1024), 2))
                      GENERATED_MB
              FROM V$ARCHIVED_LOG
             WHERE ARCHIVED = 'YES'
          GROUP BY TO_CHAR (COMPLETION_TIME, 'DD/MM/YYYY')) SUM_ARCH,
         (  SELECT TO_CHAR (COMPLETION_TIME, 'DD/MM/YYYY') DAY,
                   SUM (ROUND ( (blocks * block_size) / (1024 * 1024), 2))
                      DELETED_MB
              FROM V$ARCHIVED_LOG
             WHERE ARCHIVED = 'YES' AND DELETED = 'YES'
          GROUP BY TO_CHAR (COMPLETION_TIME, 'DD/MM/YYYY')) SUM_ARCH_DEL
   WHERE SUM_ARCH.DAY = SUM_ARCH_DEL.DAY(+)
ORDER BY TO_DATE (DAY, 'DD/MM/YYYY');

***********************************************************************************

## Date & Time wise log generation.

set pages 1000
select to_char(COMPLETION_TIME,'DD-MM-YYYY hh24:mm') Hour,thread# , round(sum(BLOCKS*BLOCK_SIZE)/1048576) MB,count(*) Archives from v$archived_log
group by to_char(COMPLETION_TIME,'DD-MM-YYYY hh24:mm'),thread# order by 1 ;

Note: See the output Date & time when archive log frequently generates. After that create AWR/ASH report and see the TOP wait events.

***********************************************************************************

##  Hour wise archive generation:

SELECT to_date(first_time) DAY,
to_char(sum(decode(to_char(first_time,'HH24'),'00',1,0)),'999') "00",
to_char(sum(decode(to_char(first_time,'HH24'),'01',1,0)),'999') "01",
to_char(sum(decode(to_char(first_time,'HH24'),'02',1,0)),'999') "02",
to_char(sum(decode(to_char(first_time,'HH24'),'03',1,0)),'999') "03",
to_char(sum(decode(to_char(first_time,'HH24'),'04',1,0)),'999') "04",
to_char(sum(decode(to_char(first_time,'HH24'),'05',1,0)),'999') "05",
to_char(sum(decode(to_char(first_time,'HH24'),'06',1,0)),'999') "06",
to_char(sum(decode(to_char(first_time,'HH24'),'07',1,0)),'999') "07",
to_char(sum(decode(to_char(first_time,'HH24'),'08',1,0)),'999') "08",
to_char(sum(decode(to_char(first_time,'HH24'),'09',1,0)),'999') "09",
to_char(sum(decode(to_char(first_time,'HH24'),'10',1,0)),'999') "10",
to_char(sum(decode(to_char(first_time,'HH24'),'11',1,0)),'999') "11",
to_char(sum(decode(to_char(first_time,'HH24'),'12',1,0)),'999') "12",
to_char(sum(decode(to_char(first_time,'HH24'),'13',1,0)),'999') "13",
to_char(sum(decode(to_char(first_time,'HH24'),'14',1,0)),'999') "14",
to_char(sum(decode(to_char(first_time,'HH24'),'15',1,0)),'999') "15",
to_char(sum(decode(to_char(first_time,'HH24'),'16',1,0)),'999') "16",
to_char(sum(decode(to_char(first_time,'HH24'),'17',1,0)),'999') "17",
to_char(sum(decode(to_char(first_time,'HH24'),'18',1,0)),'999') "18",
to_char(sum(decode(to_char(first_time,'HH24'),'19',1,0)),'999') "19",
to_char(sum(decode(to_char(first_time,'HH24'),'20',1,0)),'999') "20",
to_char(sum(decode(to_char(first_time,'HH24'),'21',1,0)),'999') "21",
to_char(sum(decode(to_char(first_time,'HH24'),'22',1,0)),'999') "22",
to_char(sum(decode(to_char(first_time,'HH24'),'23',1,0)),'999') "23", count(*) TOT
from
v$log_history
where to_date(first_time) > sysdate -8
GROUP by
to_char(first_time,'YYYY-MON-DD'), to_date(first_time)
order by to_date(first_time)
/

***********************************************************************************

##  Archive generation number with volume:  day/hour wise

select trunc(COMPLETION_TIME,'DD') Day, thread#, round(sum(BLOCKS*BLOCK_SIZE)/1048576/1024) GB,count(*)  Archives_Generated from v$archived_log group by trunc(COMPLETION_TIME,'DD'), thread# order by 1;

select to_char(COMPLETION_TIME,'DD-MON-YY HH24') Hour, round(sum(BLOCKS*BLOCK_SIZE)/1048576/1024) gb, count(*)  Archives from v$archived_log group by to_char(COMPLETION_TIME,'DD-MON-YY HH24') order by 1 ;

***********************************************************************************

##  Currently session wise redo generation:

select b.inst_id,
       lpad((b.SID || ',' || lpad(b.serial#,5)),11) sid_serial,
       b.username,
       machine,
       b.osuser,
       b.status,
       a.redo_mb
from (select n.inst_id, sid,
             round(value/1024/1024) redo_mb
        from gv$statname n, gv$sesstat s
        where n.inst_id=s.inst_id
              and n.name = 'redo size'
              and s.statistic# = n.statistic#
        order by value desc
     ) a,
     gv$session b
where b.inst_id=a.inst_id
  and a.sid = b.sid
and   rownum <= 30
;

***********************************************************************************

##   Which segments are generating redo logs:

SELECT to_char(begin_interval_time,'YY-MM-DD HH24') snap_time,
        dhso.object_name,
        sum(db_block_changes_delta) BLOCK_CHANGED
  FROM dba_hist_seg_stat dhss,
       dba_hist_seg_stat_obj dhso,
       dba_hist_snapshot dhs
  WHERE dhs.snap_id = dhss.snap_id
    AND dhs.instance_number = dhss.instance_number
    AND dhss.obj# = dhso.obj#
    AND dhss.dataobj# = dhso.dataobj#
    AND begin_interval_time BETWEEN to_date('15-10-31 08:00','YY-MM-DD HH24:MI')
                                AND to_date('15-10-31 10:00','YY-MM-DD HH24:MI')
  GROUP BY to_char(begin_interval_time,'YY-MM-DD HH24'),
           dhso.object_name
  HAVING sum(db_block_changes_delta) > 0
ORDER BY sum(db_block_changes_delta) desc ;

***********************************************************************************

##  What sql was causing the redo log generation:

SELECT to_char(begin_interval_time,'YYYY_MM_DD HH24') WHEN,
       dbms_lob.substr(sql_text,4000,1) SQL,
       dhss.instance_number INST_ID,
       dhss.sql_id,
       executions_delta exec_delta,
       rows_processed_delta rows_proc_delta
  FROM dba_hist_sqlstat dhss,
       dba_hist_snapshot dhs,
       dba_hist_sqltext dhst
  WHERE upper(dhst.sql_text) LIKE '%USR_RACUNI_MV%'
    AND ltrim(upper(dhst.sql_text)) NOT LIKE 'SELECT%'
    AND dhss.snap_id=dhs.snap_id
    AND dhss.instance_number=dhs.instance_number
    AND dhss.sql_id=dhst.sql_id
    AND begin_interval_time BETWEEN to_date('15-10-31 07:00','YY-MM-DD HH24:MI')
                                AND to_date('15-10-31 16:00','YY-MM-DD HH24:MI');


***********************************************************************************

## Session wise programe redo entris genrate


col username for a15
col program for a20
col name for a20

select * from
(select /*+ first_rows */ a.username,a.sid,a.program,b.name,c.value
from v$session a,v$statname b,v$sesstat c
where a.sid=c.sid and b.statistic#=c.statistic# and b.name='redo entries' order by  c.value desc)
where rownum < 11
;


***********************************************************************************

## For RAC Env.
Author: Riyaj Shamsudeen

spool redosize.log
col begin_interval_time format a30
set lines 160 pages 1000
col end_interval_time format a30
set colsep '|'
alter session set nls_date_format='DD-MON-YYYY';
with redo_sz as (
SELECT  sysst.snap_id, sysst.instance_number, begin_interval_time ,end_interval_time ,  startup_time,
VALUE - lag (VALUE) OVER ( PARTITION BY  startup_time, sysst.instance_number
                ORDER BY begin_interval_time, startup_time, sysst.instance_number) stat_value,
EXTRACT (DAY    FROM (end_interval_time-begin_interval_time))*24*60*60+
            EXTRACT (HOUR   FROM (end_interval_time-begin_interval_time))*60*60+
            EXTRACT (MINUTE FROM (end_interval_time-begin_interval_time))*60+
            EXTRACT (SECOND FROM (end_interval_time-begin_interval_time)) DELTA
  FROM sys.wrh$_sysstat sysst , DBA_HIST_SNAPSHOT snaps
WHERE (sysst.dbid, sysst.stat_id) IN ( SELECT dbid, stat_id FROM sys.wrh$_stat_name WHERE  stat_name='redo size' )
AND snaps.snap_id = sysst.snap_id
AND snaps.dbid =sysst.dbid
AND sysst.instance_number=snaps.instance_number
and begin_interval_time > sysdate-90
)
select instance_number,
  to_date(to_char(begin_interval_time,'DD-MON-YYYY'),'DD-MON-YYYY') dt
, sum(stat_value) redo1
from redo_sz
group by  instance_number,
  to_date(to_char(begin_interval_time,'DD-MON-YYYY'),'DD-MON-YYYY')
order by instance_number, 2
/
spool off;

Saturday, 3 October 2015

How to truncate Audit table (SYS. AUD$).

Friends, Recently I was trying to Truncate the SYS.AUD$ table, but it won't allow me to do
 the same,because of application is running. (got the error like "resources busy") and found there are lots of DML operation are inserting the record in audit table. 

Use the below command to truncate the audit table.
1) Active and Inactive session.
select status,username,last_call_et from v$session where username is not null;

2) check the long running queries.
col USERNAME for a15
Set lines 2000
set pages 200
col MODULE for a20
col SQL_TEXT for a75
col machine for a20
select distinct s.username,s.sid,s.serial#,round(s.last_call_et/60,2) "MINS_RUNNING",s.module,s.machine,q.sql_text from v$session s
join v$sqlarea q
on s.sql_address(+) = q.address
where status='ACTIVE'
and type <>'BACKGROUND'
and s.last_call_et > 1800
and q.sql_text not like '%begin%'
order by sid,serial#;
 
3) Try to Truncate the SYS.AUD$ table.
SQL> Truncate table sys.aud$.
ERROR: ORA-00054: resource busy and acquire with NOWAIT ...

4) Inform Application team for shutdown the application.

5) Shutdown the Database as well.
SQL> shutdown immediate;

6) Restart the database..
SQL> Startup

7) Take the Audit table export backup by using normal EXP command.
$ exp file=EXP_AUD_TABLE_<SID><date>.dmp LOG=EXP_AUD_TABLE_<SID><date>.log Tables=SYS.AUD$ buffer=1000000.

The above command takes some time to export the table data.


8) Truncate audit table to reduce size.
SQL>CONN / as sysdba
SQL>TRUNCATE TABLE sys.aud$;


Inform application team to start the Application.

Thursday, 10 September 2015

CLUSTER PSU Patch

Applying CLUSTER PSU Patch 11.2.0.1.2 To A Two Node RAC

NOTE : This article is aimed at showing the issues and resolution while applying the PSU patch 11.2.0.1.2 (9655006) to a specific environment and not generalized. Please refer the readme of the PSU patch for detailed procedure to apply PSU patch.
Video Demos are also uploaded to show you the errors and successfull installation of the PSU to 2 node RAC.

1. Record Pre Patch Information.
Login to each node in RAC as grid user and execute the following command.
$GRID_ORACLE_HOME/OPatch/opatch lsinventory
$GRID_ORACLE_HOME/OPatch/opatch lsinventory -bugs_fixed | grep -i ‘GI PSU’
Login to each node in RAC as oracle user and execute the following command.
$ORACLE_HOME/OPatch/opatch lsinventory
$ORACLE_HOME/OPatch/opatch lsinventory -bugs_fixed | grep -i ‘DATABASE PSU’
Connect to each instance and record registry information.
SQL> select comp_name,version,status from dba_registry;

2. OPatch Utility Information.
$ORACLE_HOME/OPatch/opatch version -h /u01/home/oracle/product/11.2.0/db_1
$GRID_ORACLE_HOME/OPatch/opatch version -h /u01/home/11.2.0/grid

3. OCM Configuration.
Create ocm response file using the following command and provide appropriate values for the prompts.
$GRID_ORACLE_HOME/OPatch/ocm/bin/emocmrsp
Verify the created file using,
$GRID_ORACLE_HOME/OPatch/ocm/bin/emocmrsp –verbose ocm.rsp
NOTE: The Opatch utility will prompt for your OCM (Oracle Configuration Manager) response file when it is run. Without which we cant proceed further.
  

4. Validation of Oracle Inventory.
$GRID_ORACLE_HOME/OPatch/opatch lsinventory -detail -oh /u01/home/11.2.0/grid
$ORACLE_HOME/OPatch/opatch lsinventory -detail –oh /u01/home/oracle/product/11.2.0/db_1

5. One-off Patch Conflict Detection and Resolution.
NA

6. Download and Unzip the PSU Patch 9655006

$cd /u01/home/oracle/product/11.2.0/db_1/patches/psupatch
$unzip p9655006_11201_Linux.zip
$chmow -R 777 *

7. Patching GI Home

NOTE: If the GI home is shared, then make sure to shut down the GI stack on all remote nodes. Keep the GI stack up and running on the local node.
NOTE: If the GI home is not shared, then make sure the GI stack is running on all nodes in the cluster.
Our Grid Home is not shared, So don’t shutdown any services.

$su – ( Login to root user )
#/u01/home/11.2.0/grid/OPatch/opatch auto /u01/home/oracle/product/11.2.0/db_1/patches/psupatch -oh /u01/home/11.2.0/grid

Monitor the logfile created in $GRID_ORACLE_HOME/cfgtoollogs/
Execute the above opatch command on each RAC node as root user.
** Please refer the Issue & Resolutions secion in the same document for any issues.

8. Patching RAC Database Homes
All Oracle processes and applications (such as emconsole and emagent) that are running from the database home and that are not managed by clusterware should be stopped manually before you apply the patch using the opatch auto command.

$su – ( Login to root user )
# /u01/home/oracle/product/11.2.0/db_1/OPatch/opatch auto /u01/home/oracle/product/11.2.0/db_1/patches/psupatch -oh /u01/home/oracle/product/11.2.0/db_1

Monitor the logfile created in $ORACLE_HOME/cfgtoollogs/
Execute the above opatch command on each RAC node as root user.
** Please refer the Issue & Resolutions secion in the same document for any issues.


9. Loading Modified SQL Files into the Database.

For each database instance running on the Oracle home being patched, connect to the database using SQL*Plus. Connect as SYSDBA and run the catbundle.sql script as follows:

cd $ORACLE_HOME/rdbms/admin
sqlplus /nolog
SQL> CONNECT / AS SYSDBA
SQL> @catbundle.sql psu apply
SQL> QUIT
Check the log files in $ORACLE_HOME/cfgtoollogs/catbundle for any errors

10. Patch Successful Verification Steps.
Login to each node in RAC as grid user and execute the following command.
$GRID_ORACLE_HOME/OPatch/opatch lsinventory -bugs_fixed | grep -i ‘GI PSU’
Login to each node in RAC as oracle user and execute the following command.
$ORACLE_HOME/OPatch/opatch lsinventory -bugs_fixed | grep -i ‘DATABASE PSU’
Connect to each instance and record registry information.
SQL> select comp_name,version,status from dba_registry;
SQL> select * from dba_registry_history;



11. Issues & Resolutions.

Issue 1:
When applying the patch, you may get the following error
The opatch minimum version check for patch /u01/home/oracle/product/11.2.0/db_1/patches/9655006/custom failed for /u01/home/11.2.0/grid
The opatch minimum version check for patch /u01/home/oracle/product/11.2.0/db_1/patches/9655006/etc failed for /u01/home/11.2.0/grid
The opatch minimum version check for patch /u01/home/oracle/product/11.2.0/db_1/patches/9655006/files failed for /u01/home/11.2.0/grid
Opatch version check failed for oracle home /u01/home/11.2.0/grid
Opatch version check failed
update the opatch version for the failed homes and retry

Solution:
Ref Note : 1308858.1
We need to provide the Patch unzipped base directory, not the directory including patch number
Ex : /u01/home/oracle/product/11.2.0/db_1/patches/psupatch/9655006 ( Wrong )
/u01/home/oracle/product/11.2.0/db_1/patches/psupatch ( correct )

Issue 2
Patch may exit with the following error messages
Unable to determine if /u01/home/11.2.0/grid is shared oracle home
Enter ‘yes’ if this is not a shared home or if the prerequiste actions are performed to patch this shared home (yes/no):yes
You must kill crs processes or reboot the system to properly
cleanup the processes started by Oracle clusterware
The Oracle Clusterware stack failed to stop.
You should stop the stack with ‘crsctl stop crs’ and rerun the command
The opatch Applicable check failed for /u01/home/11.2.0/grid. The patch is not applicable for /u01/home/11.2.0/grid
patch ././9655006 apply failed for home /u01/home/11.2.0/grid
Solution :
This error may be specific to this environment though want to specify it here. This error is due to the reason that there are some cluster resources available referring the 10g database installed earlier. Delete those resources from clusterware.
# crsctl delete resource <resource_name> -f


Issue 3 :
PSU patch for GRID home on node 2 failed with the following error
Unable to determine if /u01/home/11.2.0/grid is shared oracle home
Enter ‘yes’ if this is not a shared home or if the prerequiste actions are performed to patch this shared home (yes/no):yes
Successfully unlock /u01/home/11.2.0/grid
patch ././9655006 apply failed for home /u01/home/11.2.0/grid
Verified the detailed log file locate in /u01/home/11.2.0/grid/cfgtoollogs/ and found the permission issue on some files.
The following actions have failed:
Copy failed from ‘/u01/home/oracle/product/11.2.0/db_1/patches/psupatch/9655006/files/bin/crsctl.bin’ to ‘/u01/home/11.2.0/grid/bin/crsctl.bin’…
Copy failed from ‘/u01/home/oracle/product/11.2.0/db_1/patches/psupatch/9655006/files/bin/oifcfg.bin’ to ‘/u01/home/11.2.0/grid/bin/oifcfg.bin’…

Solution:
Give 777 permission to these files crsctl.bin and oifcfg.bin

Issue 4 :
PSU Patch for RDBMS Home on node 1 failed with the following error
Unable to determine if /u01/home/oracle/product/11.2.0/db_1 is shared oracle home
Enter ‘yes’ if this is not a shared home or if the prerequiste actions are performed to patch this shared home (yes/no):yes
patch ././9655006/custom/server/9655006 apply failed for home /u01/home/oracle/product/11.2.0/db_1
Verified the detailed log file locate in /u01/home/oracle/product/11.2.0/db_1/cfgtoollogs and found the platform issue.
Running prerequisite checks…
Prerequisite check “CheckPatchApplicableOnCurrentPlatform” failed.
The details are:
Patch ( 9655006 ) is not applicable on current platform.
Platform ID needed is : 46
Platform IDs supported by patch are: 226
UtilSession failed: Prerequisite check “CheckPatchApplicableOnCurrentPlatform” failed.
OPatch failed with error code 73

Solution:
export OPATCH_PLATFORM_ID=226 and execute the psu patch command again.


Issue 5:
While applying Grid Infrastructure PSU (patch 9343627) to $GRID_HOME, opatch prerequisite check CheckSystemSpace failed:
Running prerequisite checks…
 Prerequisite check “CheckSystemSpace” failed.
 The details are:
 Required amount of space(4373569440) is not available.
 UtilSession failed: Prerequisite check “CheckSystemSpace” failed.
 OPatch failed with error code 73

Solution:
Follow the note ID 1088455.1 and make sure you have enough space on disk in GRID HOME mount point.


Reference NOTE IDs 
1082394.1 , 
1308858.1 , 
1169036.1 , 
1290354.1 , 
1088455.1 ,
1210964.1

OPATCH PATCH IN RAC


1.    The worked example below will use the following settings:
CRS_HOME = /u01/crs/oracle/product/10/crs

2.    The patch being applied is p123456
               oracle:> export PATH=$PATH:$ORACLE_HOME/OPatch

               oracle:> which opatch
               /u01/crs/oracle/product/10/crs/OPatch/opatch

3.    Each node of the cluster has its own CRS Home, the patch should be applied as a rolling upgrade. All of the following
steps should be followed for each node.
Do not patch two nodes at once.

4.    As the Oracle Clusterware (CRS) software owner check CRS_HOME.
oracle:> opatch lsinventory -detail –oh /u01/app/oracle/product/db10.2.0/
Invoking OPatch 10.2.0.3.0
...
  Remote node = <node x>
-------------------------------------------------------------------
 
OPatch succeeded.

·         The above should list the components and the list of nodes. If the Oracle inventory is not setup correctly the OPatch utility will fail.

5.    Unzip the patch set container file, this will create one or more sub-directories.
      % unzip p123456.zip
 
      Archive:  p123456.zip
         creating: 123456/
      ...

6.    Shut down the RDBMS and ASM instances, listeners and nodeapps followed by CRS daemons on the local node.
·         To shutdown RDBMS instance on the local node run the following command:
% $ORACLE_HOME/bin/srvctl stop instance -d dbname -i instance_name
·         To shutdown ASM instances run the following command on each node:
         % $ORACLE_HOME/bin/srvctl stop asm -n <node_name>
·         To shutdown nodeapps run the following comand on each node:
         % $ORACLE_HOME/bin/srvctl stop nodeapps -n <node_name>


7.    Now shutdown CRS daemons on each node by running as root:
root # $CRS_HOME/bin/crsctl stop crs
Stopping resources. This could take several minutes.
Successfully stopped CRS resources.
Stopping CSSD.
Shutting down CSS daemon.
Shutdown request successfully issued.

8.    Prior to applying this part of the fix, invoke the unlock script as root to unlock protected files.
su -
 
root # cd <patch directory>/123456
root # custom/scripts/prerootpatch.sh -crshome  /u01/crs/oracle/product/10/crs -crsuser oracle
root # exit

9.    Now invoke an additional script as the crs software installer/owner. This script will save important configuration settings.
oracle:> cd <patch directory>/123456
custom/scripts/prepatch.sh -crshome /u01/crs/oracle/product/10/crs
custom/scripts/prepatch.sh completed successfully.


10. After unlocking any protected files and saving configuration settings run opatch as the Oracle Clusterware (CRS) software owner.

cd <patch directory>/123456
oracle:> opatch apply -local -oh /u01/crs/oracle/product/10/crs
Invoking OPatch 10.2.0.3.0
 
Oracle interim Patch Installer version 10.2.0.3.0
Copyright (c) 2005, Oracle Corporation.  All rights reserved..
 
Oracle Home       : /u01/crs/oracle/product/10/crs
Central Inventory : /u01/app/oracle/oraInventory
   from           : /var/opt/oracle/oraInst.loc
OPatch version    : 10.2.0.3.0
OUI version       : 10.2.0.3.0
OUI location      : /u01/crs/oracle/product/10/crs/oui
Log file location :/u01/crs/oracle/product/10/crs/cfgtoollogs/opatch/opatch2008-02-8_11-51-38AM.log
 
ApplySession applying interim patch '123456' to OH'/u01/crs/oracle/product/10/crs'
ApplySession: Optional component(s) [ oracle.rdbms, 10.2.0.3.0 ]  
 not present in the Oracle Home or a higher version is found.
Invoking fuser to check for active processes.
 
You selected -local option, hence OPatch will patch the local systemonly.
 
Please shutdown Oracle instances running out of this ORACLE_HOME onthe local system.
(Oracle Home = '/u01/crs/oracle/product/10/crs')
 
Is the local system ready for patching?
 
Do you want to proceed? [y|n]
y
User Responded with: Y
Backing up files and inventory (not for auto-rollback) for the Oracle Home
Backing up files affected by the patch '123456' for restore. This might take a while...
Backing up files affected by the patch '123456' for rollback. This might take a while...
 
Patching component oracle.rdbms.rsf, 10.2.0.3.0...
Updating archive file "/u01/crs/oracle/product/10/crs/lib/libgeneric10.a"  with "lib/libgeneric10.a/skgfr.o"
Updating archive file "/u01/crs/oracle/product/10/crs/lib32/libgeneric10.a"  with "lib32/libgeneric10.a/skgfr.o"
ApplySession adding interim patch '123456' to inventory
 
Verifying the update...
Inventory check OK: Patch ID 123456 is registered in Oracle Home inventory with proper meta-data.
Files check OK: Files from Patch ID 123456 are present in Oracle Home.
Running make for target client_sharedlib
 
The local system has been patched and can be restarted.
 
OPatch succeeded.




11. Restore the security settings and restart CRS by running the following as root:

su -
 
Sourcing /root/.profile-EIS.....
root # cd <patch directory>/123456
root # custom/scripts/postrootpatch.sh -crshome /u01/crs/oracle/product/10/crs
Checking to see if Oracle CRS stack is already up...
Checking to see if Oracle CRS stack is already starting
WARNING: directory '/u01/crs/oracle/product/10' is not owned by root
WARNING: directory '/u01/crs/oracle/product' is not owned by root
WARNING: directory '/u01/crs/oracle' is not owned by root
Startup will be queued to init within 30 seconds.
Waiting for the Oracle CRSD and EVMD to start
Oracle CRS stack installed and running under init(1M)

PATCH DEINSTALLATION INSTRUCTIONS
To roll back the patch, invoke the following opatch commands to roll back the patch in all homes.
% opatch rollback -id 123456 -local -oh <CRS_HOME>