Monday, 19 December 2016

CURRENT SESSION


# script to find redo generated by current sessions

set lines 2000
set pages 1000
col sid for 99999
col name for a09
col username for a14
col PROGRAM for a21
col MODULE for a25
select s.sid,sn.SERIAL#,n.name, round(value/1024/1024,2) redo_mb, sn.username,sn.status,substr (sn.program,1,21) "program", sn.type, sn.module,sn.sql_id
from v$sesstat s join v$statname n on n.statistic# = s.statistic#
join v$session sn on sn.sid = s.sid where n.name like 'redo size' and s.value!=0 order by
redo_mb desc;



# script to check INACTIVE sessions with HIGH DISK IO

select p.spid,s.username, s.sid,s.status,t.disk_reads, s.last_call_et/3600 last_call_et_Hrs,
s.action,s.program,s.machine cli_mach,s.process cli_process,lpad(t.sql_text,30) "Last SQL"
from gv$session s, gv$sqlarea t,v$process p
where s.sql_address =t.address and
s.sql_hash_value =t.hash_value and
p.addr=s.paddr and
t.disk_reads > 5000
and s.status='INACTIVE'
and s.process='1234'
order by S.PROGRAM;


# script to analyze Disk IO’s

prompt SESSIONS PERFORMING HIGH I/O > 50000

select p.spid, s.sid,s.process cli_process, s.status,t.disk_reads, s.last_call_et/3600 last_call_et_Hrs,
s.action,s.program,lpad(t.sql_text,30) "Last SQL"
from v$session s, v$sqlarea t,v$process p
where s.sql_address =t.address and
s.sql_hash_value =t.hash_value and
p.addr=s.paddr and
t.disk_reads > 10000
order by t.disk_reads desc;

Friday, 18 November 2016

Rolling a Standby Forward using an RMAN Incremental Backup To Fix The Nologging Changes

---- Example error in Standby DB ------

ORA-01578: 
ORA-01110: 
ORA-26040: Data block was loaded using the NOLOGGING option

-----------------------------------------------------------------------------------------

--- Backup Server ---

col FIRST_NONLOGGED_SCN format  999999999999999
SELECT FILE#, FIRST_NONLOGGED_SCN FROM V$DATAFILE WHERE FIRST_NONLOGGED_SCN > 0 ;

     FILE# FIRST_NONLOGGED_SCN
---------- -------------------
         4       2229616324279
         6       2229616324282
         7       2229616324285
         8       2229486974087
         9       2229486973946
        10       2229486973946
        11       2229486973954
        19       2229616324228
        20       2229616324228
        21       2229616324231
        22       2229486973959
        24       2229486974422
        25       2229616324273
        27       2229486974437
        28       2229486974454
        29       2229616324276



 recovery cancel;
 ALTER DATABASE DATAFILE 4 OFFLINE FOR DROP;
 ALTER DATABASE DATAFILE 6 OFFLINE FOR DROP;
 ALTER DATABASE DATAFILE 7 OFFLINE FOR DROP;
 ALTER DATABASE DATAFILE 8 OFFLINE FOR DROP;
 ALTER DATABASE DATAFILE 9 OFFLINE FOR DROP;
 ALTER DATABASE DATAFILE 10 OFFLINE FOR DROP;
 ALTER DATABASE DATAFILE 11 OFFLINE FOR DROP;
 ALTER DATABASE DATAFILE 19 OFFLINE FOR DROP;
 ALTER DATABASE DATAFILE 20 OFFLINE FOR DROP;
 ALTER DATABASE DATAFILE 21 OFFLINE FOR DROP;
 ALTER DATABASE DATAFILE 22 OFFLINE FOR DROP;
 ALTER DATABASE DATAFILE 24 OFFLINE FOR DROP;
 ALTER DATABASE DATAFILE 25 OFFLINE FOR DROP;
 ALTER DATABASE DATAFILE 27 OFFLINE FOR DROP;
 ALTER DATABASE DATAFILE 28 OFFLINE FOR DROP;
 ALTER DATABASE DATAFILE 29 OFFLINE FOR DROP;








-- PRIMARY --
BACKUP INCREMENTAL FROM SCN 2229616324279 DATAFILE 4 FORMAT '/u04/rbackup/ForStandby_%U' TAG 'FOR STANDBY';
BACKUP INCREMENTAL FROM SCN 2229616324282 DATAFILE 6 FORMAT '/u04/rbackup/ForStandby_%U' TAG 'FOR STANDBY';
BACKUP INCREMENTAL FROM SCN 2229616324285 DATAFILE 7 FORMAT '/u04/rbackup/ForStandby_%U' TAG 'FOR STANDBY';
BACKUP INCREMENTAL FROM SCN 2229486974087 DATAFILE 8 FORMAT '/u04/rbackup/ForStandby_%U' TAG 'FOR STANDBY';
BACKUP INCREMENTAL FROM SCN 2229486973946 DATAFILE 9 FORMAT '/u04/rbackup/ForStandby_%U' TAG 'FOR STANDBY';
BACKUP INCREMENTAL FROM SCN 2229486973946 DATAFILE 10 FORMAT '/u04/rbackup/ForStandby_%U' TAG 'FOR STANDBY';
BACKUP INCREMENTAL FROM SCN 2229486973954 DATAFILE 11 FORMAT '/u04/rbackup/ForStandby_%U' TAG 'FOR STANDBY';
BACKUP INCREMENTAL FROM SCN 2229616324228 DATAFILE 19 FORMAT '/u04/rbackup/ForStandby_%U' TAG 'FOR STANDBY';
BACKUP INCREMENTAL FROM SCN 2229616324228 DATAFILE 20 FORMAT '/u04/rbackup/ForStandby_%U' TAG 'FOR STANDBY';
BACKUP INCREMENTAL FROM SCN 2229616324231 DATAFILE 21 FORMAT '/u04/rbackup/ForStandby_%U' TAG 'FOR STANDBY';
BACKUP INCREMENTAL FROM SCN 2229486973959 DATAFILE 22 FORMAT '/u04/rbackup/ForStandby_%U' TAG 'FOR STANDBY';
BACKUP INCREMENTAL FROM SCN 2229486974422 DATAFILE 24 FORMAT '/u04/rbackup/ForStandby_%U' TAG 'FOR STANDBY';
BACKUP INCREMENTAL FROM SCN 2229616324273 DATAFILE 25 FORMAT '/u04/rbackup/ForStandby_%U' TAG 'FOR STANDBY';
BACKUP INCREMENTAL FROM SCN 2229486974437 DATAFILE 27 FORMAT '/u04/rbackup/ForStandby_%U' TAG 'FOR STANDBY';
BACKUP INCREMENTAL FROM SCN 2229486974454 DATAFILE 28 FORMAT '/u04/rbackup/ForStandby_%U' TAG 'FOR STANDBY';
BACKUP INCREMENTAL FROM SCN 2229616324276 DATAFILE 29 FORMAT '/u04/rbackup/ForStandby_%U' TAG 'FOR STANDBY';


SCP the backup from primary to backup;


-- Backup --

rman connect > calatog start with

 ALTER DATABASE DATAFILE 4 online;
 ALTER DATABASE DATAFILE 6 online;
 ALTER DATABASE DATAFILE 7 online;
 ALTER DATABASE DATAFILE 8 online;
 ALTER DATABASE DATAFILE 9 online;
 ALTER DATABASE DATAFILE 10 online;
 ALTER DATABASE DATAFILE 11 online;
 ALTER DATABASE DATAFILE 19 online;
 ALTER DATABASE DATAFILE 20 online;
 ALTER DATABASE DATAFILE 21 online;
 ALTER DATABASE DATAFILE 22 online;
 ALTER DATABASE DATAFILE 24 online;
 ALTER DATABASE DATAFILE 25 online;
 ALTER DATABASE DATAFILE 27 online;
 ALTER DATABASE DATAFILE 28 online;
 ALTER DATABASE DATAFILE 29 online;


- recover DATAFILE 4,6,7,8,9,10,11,19,20,21,22,24,25,27,28,29; # For manual standby 

or

- recover DATAFILE 4,6,7,8,9,10,11,19,20,21,22,24,25,27,28,29 NOREDO;


- Now perform recovery standby db


For more detail Below the reference doc ID.

Reference :

Rolling a Standby Forward using an RMAN Incremental Backup To Fix The Nologging Changes [ID 958181.1]

Saturday, 5 November 2016

CleanUp Temporary Segments Occupying Permanent Tablespace


>  Alert Log

Hex dump of (file 4, block 1310651) in trace file /u01/app/oracle/admin/sarathi/bdump/sarathi_smon_3309.trc
Corrupt block relative dba: 0x0113ffbb (file 4, block 1310651)
Bad header found during buffer read
Data in bad block:
 type: 40 format: 2 rdba: 0x01038a9c
 last change scn: 0x0000.00077dc3 seq: 0x2 flg: 0x04
 spare1: 0x0 spare2: 0x0 spare3: 0x0
 consistency value in tail: 0x00000000
 check value in block header: 0x7f68
 computed block checksum: 0x0
Reread of rdba: 0x0113ffbb (file 4, block 1310651) found same corrupted data
Corrupt Block Found
         TSN = 4, TSNAME = USERS
         RFN = 4, BLK = 1310635, RDBA = 18087851
         OBJN = -1, OBJD = 52382, OBJECT = , SUBOBJECT =
         SEGMENT OWNER = , SEGMENT TYPE =
Hex dump of (file 4, block 1310635) in trace file /u01/app/oracle/admin/sarathi/bdump/sarathi_smon_3309.trc
Corrupt block relative dba: 0x0113ffab (file 4, block 1310635)
Bad header found during buffer read
Data in bad block:
 type: 40 format: 2 rdba: 0x01038a9c
 last change scn: 0x0000.00077dc3 seq: 0x2 flg: 0x04
 spare1: 0x0 spare2: 0x0 spare3: 0x0
 consistency value in tail: 0x00000000
 check value in block header: 0x7f68
 computed block checksum: 0x0
Reread of rdba: 0x0113ffab (file 4, block 1310635) found same corrupted data
Hex dump of (file 4, block 1310643) in trace file /u01/app/oracle/admin/sarathi/bdump/sarathi_smon_3309.trc
Corrupt block relative dba: 0x0113ffb3 (file 4, block 1310643)
Bad header found during buffer read
Data in bad block:
 type: 40 format: 2 rdba: 0x01038a9c
 last change scn: 0x0000.00077dc3 seq: 0x2 flg: 0x04
 spare1: 0x0 spare2: 0x0 spare3: 0x0
 consistency value in tail: 0x00000000
 check value in block header: 0x7f68
 computed block checksum: 0x0
Reread of rdba: 0x0113ffb3 (file 4, block 1310643) found same corrupted data
Hex dump of (file 4, block 1310651) in trace file /u01/app/oracle/admin/sarathi/bdump/sarathi_smon_3309.trc
Corrupt block relative dba: 0x0113ffbb (file 4, block 1310651)
Bad header found during buffer read
Data in bad block:
 type: 40 format: 2 rdba: 0x01038a9c
 last change scn: 0x0000.00077dc3 seq: 0x2 flg: 0x04
 spare1: 0x0 spare2: 0x0 spare3: 0x0
 consistency value in tail: 0x00000000
 check value in block header: 0x7f68
 computed block checksum: 0x0
Reread of rdba: 0x0113ffbb (file 4, block 1310651) found same corrupted data
Corrupt Block Found
         TSN = 4, TSNAME = USERS
         RFN = 4, BLK = 1310643, RDBA = 18087859
         OBJN = -1, OBJD = 52383, OBJECT = , SUBOBJECT =
         SEGMENT OWNER = , SEGMENT TYPE =


>  Some work around but not success
 
SQL> select *
from dba_extents
where file_id = &DATA_FILE_ID
and &CORRUPTED_BLOCK_ID between block_id AND block_id + blocks - 1;  2    3    4
Enter value for data_file_id: 4
old   3: where file_id = &DATA_FILE_ID
new   3: where file_id = 4
Enter value for corrupted_block_id: 1310643
old   4: and &CORRUPTED_BLOCK_ID between block_id AND block_id + blocks - 1
new   4: and 1310643 between block_id AND block_id + blocks - 1

no rows selected


SQL> select *
from dba_extents
where file_id = &DATA_FILE_ID
and &CORRUPTED_BLOCK_ID between block_id AND block_id + blocks - 1;  2    3    4
Enter value for data_file_id: 4
old   3: where file_id = &DATA_FILE_ID
new   3: where file_id = 4
Enter value for corrupted_block_id: 1310635
old   4: and &CORRUPTED_BLOCK_ID between block_id AND block_id + blocks - 1
new   4: and 1310635 between block_id AND block_id + blocks - 1

no rows selected


SQL> select *
from dba_extents
where file_id = &DATA_FILE_ID
and &CORRUPTED_BLOCK_ID between block_id AND block_id + blocks - 1;  2    3    4
Enter value for data_file_id: 4
old   3: where file_id = &DATA_FILE_ID
new   3: where file_id = 4
Enter value for corrupted_block_id: 1310651
old   4: and &CORRUPTED_BLOCK_ID between block_id AND block_id + blocks - 1
new   4: and 1310651 between block_id AND block_id + blocks - 1

no rows selected



select header_file, header_block,segment_name from dba_segments where header_file=4;

HEADER_FILE HEADER_BLOCK SEGMENT_NAME
-----------         ----------------------         ----------------------------------------
          4            1310635                            4.1310635
          4            1310643                            4.1310643
          4            1310651                            4.1310651



select owner,segment_name,segment_type,header_file,header_block,extents,(bytes/1024/1024)MB 
from dba_segments
where segment_type = 'TEMPORARY' and tablespace_name = 'USERS'



SOLUTION: 
 
==============================
USE PACKAGE DBMS_SPACE_ADMIN 
==============================

select header_file, header_block,segment_name from dba_segments where header_file=4;


select tablespace_name, owner, segment_name, sum(bytes/1024/1024) from dba_segments
where segment_type = 'TEMPORARY' group by tablespace_name, owner;


--------------------------------------------------------------------
execute DBMS_SPACE_ADMIN.SEGMENT_DROP_CORRUPT ('USERS',4,1310651);
exec DBMS_SPACE_ADMIN.SEGMENT_DROP_CORRUPT ('USERS',4,1310643);
execute DBMS_SPACE_ADMIN.SEGMENT_DROP_CORRUPT ('USERS',4,1310635);
--------------------------------------------------------------------

ERROR at line 1:
ORA-03211: The segment does not exist or is not in a valid state
ORA-06512: at "SYS.DBMS_SPACE_ADMIN", line 46
ORA-06512: at line 1

---------------------------------------------------------------------

execute dbms_space_admin.segment_corrupt('USERS',4,1310651);
execute dbms_space_admin.segment_corrupt('USERS',4,1310643);
execute dbms_space_admin.segment_corrupt('USERS',4,1310635);


--------------------------------------------------------------------

execute DBMS_SPACE_ADMIN.SEGMENT_DROP_CORRUPT ('USERS',4,1310651);
exec DBMS_SPACE_ADMIN.SEGMENT_DROP_CORRUPT ('USERS',4,1310643);
execute DBMS_SPACE_ADMIN.SEGMENT_DROP_CORRUPT ('USERS',4,1310635);

--------------------------------------------------------------------


select owner,segment_name,segment_type,header_file,header_block,extents,(bytes/1024/1024)MB 
from dba_segments
where segment_type = 'TEMPORARY' and tablespace_name = 'USERS'

no row selected


select header_file, header_block,segment_name from dba_segments where header_file=4;

no row selected





Reference :


Why Coming this issue and segment name is convert into numeric value below the rerence link ?


Tuesday, 1 November 2016

ORACLE EXA-DATA SIMULATION SETUP


Use Virtual box / vmware  for Exa-Simulation
11.2.3.2.1 : 64-bit > V36290-01.zip
GI + DB = 11.2.0.4      
OS: First tested (below protocol is working or not)
rds, rds_tcp, rds_rdma
I’m tested on RHEL/OEL ( > 5.10 and 6.x )

Discussion and configure below the points.:
1.     Install an Oracle Linux / RHEL which is support rds, rds_tcp, rds_rdma protocols (because we are not using infiniband switch H/w)
2.     Add storage cell software
3.     Prepare some virtual disks
Installation Oracle Linux

1.       Minimum 2 GB RAM Required and 40 GB HDD only for illustration. (Exa-Simulation on laptop).
2.       1 NIC only (Host only Adapter) for storage cell. If you want Internet on this machine then one more NIC add with bridge connection.
3.       After OS installation completion.
4.       Selinux and firewall disabled.
Add storage cell software

[root@storagecell ]# vi /etc/hosts

[root@storagecell ]# more /etc/hosts
# Do not remove the following line, or various programs
# that require network functionality will fail.
127.0.0.1       storagecell     storagecell.localdomain localhost.localdomain localhost
::1             localhost6.localdomain6 localhost6

Put the software cell image zip (V36290-01.zip) in the path of host system choosed as shared folder so it can be directly used inside the VM. In fact there is a special file system mounted

[root@storagecell ~]# mount
OS on /home type vboxsf (gid=160,rw)
[root@storagecell ~]# cd /home/
[root@storagecell ~]# ll
total 1506576
-rwxrwx— 1 root vboxsf 1542729472 Oct 10 22:25 V36290-01.zip

Uncompress the file

[root@storagecell ]# unzip V36290-01.zip
Archive:  V36290-01.zip
inflating: README.txt
inflating: cellImageMaker_11.2.3.2.1_LINUX.X64_130109-1.x86_64.tar

And then untar

[root@storagecell ]# tar pxvf cellImageMaker_11.2.3.2.1_LINUX.X64_130109-1.x86_64.tar

(there are some errors due permissions on host folder, but there is no problem because the only file needed i cell.bin)

[root@storagecell cellbits]# ll dl180/boot/cellbits/
total 1446844
-rwxrwx— 1 root vboxsf       729 Jan  9  2013 c7rpms.tbz
-rwxrwx— 1 root vboxsf 245231205 Jan  9  2013 cell.bin
-rwxrwx— 1 root vboxsf  12705374 Jan  9  2013 cellboot.tbz
-rwxrwx— 1 root vboxsf 141444416 Jan  9  2013 cellfw.tbz
-rwxrwx— 1 root vboxsf 142434203 Jan  9  2013 cellrpms.tbz
-rwxrwx— 1 root vboxsf 208612489 Jan  9  2013 commonos.tbz
-rwxrwx— 1 root vboxsf 375683818 Jan  9  2013 debugos.tbz
-rwxrwx— 1 root vboxsf  55704927 Jan  9  2013 doclib.zip
-rwxrwx— 1 root vboxsf 199485158 Jan  9  2013 exaos.tbz
-rwxrwx— 1 root vboxsf  18186084 Jan  9  2013 hputils.tbz
-rwxrwx— 1 root vboxsf  53387742 Jan  9  2013 kernel.tbz
-rwxrwx— 1 root vboxsf  16165382 Jan  9  2013 ofed.tbz
-rwxrwx— 1 root vboxsf  12485584 Jan  9  2013 sunutils.tbz

Then copy cell.bin and remove all

[root@storagecell ]# cp dl180/boot/cellbits/cell.bin .
[root@storagecell ]# ll cell.bin
total 1746060
-rwxrwx— 1 root vboxsf  245231205 Dec 12 10:43 cell.bin
-rwxrwx— 1 root vboxsf 1542729472 Oct 10 22:25 V36290-01.zip

Now unzip cell.bin

[root@storagecell ]# unzip cell.bin
Archive:  cell.bin
warning [cell.bin]:  6408 extra bytes at beginning or within zipfile
(attempting to process anyway)
inflating: cell-11.2.3.2.1_LINUX.X64_130109-1.x86_64.rpm
inflating: jdk-1_5_0_15-linux-amd64.rpm

Install jdk
[root@storagecell sf_OS]# rpm -ivh jdk-1_5_0_15-linux-amd64.rpm
Preparing…                ########################################### [100%]
1:jdk                    ########################################### [100%]

Preparing to install cell rpm
[root@storagecell ]# mkdir /var/log/oracle
[root@storagecell ]# chmod 775 /var/log/oracle

(It will be used also by celladmin user …)
Install cell sw
[root@storagecell ]# rpm -ivh cell-11.2.3.2.1_LINUX.X64_130109-1.x86_64.rpm
Preparing…                ########################################### [100%]
Pre Installation steps in progress …
1:cell                   ########################################### [100%]
Post Installation steps in progress …
Set cellusers group for /opt/oracle/cell11.2.3.2.1_LINUX.X64_130109/cellsrv/deploy/log directory
Set 775 permissions for /opt/oracle/cell11.2.3.2.1_LINUX.X64_130109/cellsrv/deploy/log directory
/
/
Installation SUCCESSFUL.
Starting RS and MS… as user celladmin
Done. Please Login as user celladmin and create cell to startup CELLSRV to complete cell configuration.
WARNING: Using the current shell as root to restart cell services.
Restart the cell services using a new shell.


Prepare some virtual disks


In 12c use at least 1GB file size. esp

1.      The storage cell software installed and some disks must be created.
2.      Shutdown the machine and with VirtualBox Settings create all the disks: 12 with size 500M (data) and 6 with size 400M (flash)
3.      After storage cell software install and Adding a disks. Some changes in gnome environment produce this strange behaviour.
4.      Switch to failsafe session > clieck on session then see the failsafe option.
5.      Log again in an xterm
6.      Edit /etc/bashrc
[root@storagecell]# vi /etc/bashrc
export DISPLAY=:0
7.      Save, exit and login again.
Now identify disks
[root@storagecell ~]# fdisk -l 2>/dev/null |grep “B,”
Disk /dev/sda: 26.8 GB, 26843545600 bytes
Disk /dev/sdb: 524 MB, 524288000 bytes
Disk /dev/sdc: 524 MB, 524288000 bytes
Disk /dev/sdd: 524 MB, 524288000 bytes
Disk /dev/sde: 524 MB, 524288000 bytes
Disk /dev/sdf: 524 MB, 524288000 bytes
Disk /dev/sdg: 524 MB, 524288000 bytes
Disk /dev/sdh: 524 MB, 524288000 bytes
Disk /dev/sdi: 524 MB, 524288000 bytes
Disk /dev/sdj: 524 MB, 524288000 bytes
Disk /dev/sdk: 524 MB, 524288000 bytes
Disk /dev/sdl: 524 MB, 524288000 bytes
Disk /dev/sdm: 524 MB, 524288000 bytes
Disk /dev/sdn: 419 MB, 419430400 bytes
Disk /dev/sdo: 419 MB, 419430400 bytes
Disk /dev/sdp: 419 MB, 419430400 bytes
Disk /dev/sdq: 419 MB, 419430400 bytes
Disk /dev/sdr: 419 MB, 419430400 bytes
Disk /dev/sds: 419 MB, 419430400 bytes

Looking at environment of the new celladmin user, it could be found an interesting variable that define where the cellsrv process will search for disks:
[root@storagecell unix]# echo $T_WORK
/opt/oracle/cell11.2.3.2.1_LINUX.X64_130109/disks

Under that path a new folder is needed (raw) and symbolic link to “real” disks should be created
cd /opt/oracle/cell11.2.3.2.1_LINUX.X64_130109/disks

mkdir disks

mkdir disks/raw

cd disks/raw
ln -s /dev/sdb storagecell_DISK01
ln -s /dev/sdc storagecell_DISK02
ln -s /dev/sdd storagecell_DISK03
ln -s /dev/sde storagecell_DISK04
ln -s /dev/sdf storagecell_DISK05
ln -s /dev/sdg storagecell_DISK06
ln -s /dev/sdh storagecell_DISK07
ln -s /dev/sdi storagecell_DISK08
ln -s /dev/sdj storagecell_DISK09
ln -s /dev/sdk storagecell_DISK10
ln -s /dev/sdl storagecell_DISK11
ln -s /dev/sdm storagecell_DISK12
ln -s /dev/sdn storagecell_FLASH01
ln -s /dev/sdo storagecell_FLASH02
ln -s /dev/sdp storagecell_FLASH03
ln -s /dev/sdq storagecell_FLASH04
ln -s /dev/sdr storagecell_FLASH05
ln -s /dev/sds storagecell_FLASH06

Some Changes and Errors:
Add/Set  in /etc/sysctl.conf
fs.file-max = 65536

Edit the /etc/security/limit.conf files and add/set

* soft nofile 65536
* hard nofile 65536

To communicate over InfiniBand Oracle uses rds protocol. All the modules of rds must be loaded (and configured to be loaded over machine restarts)

[root@storagecell ~]# lsmod |grep rds*
rdma_cm                73429  2 rds_rdma,ib_iser
ib_cm                  72041  1 rdma_cm
iw_cm                  43593  1 rdma_cm
ib_sa                  76489  2 rdma_cm,ib_cm
ib_core               108097  7 rds_rdma,ib_iser,rdma_cm,ib_cm,iw_cm,ib_sa,ib_mad
ib_addr                42697  1 rdma_cm
ipv6                  438369  3 rdma_cm,ib_addr,cnic

[root@storagecell ~]# modprobe rds
[root@storagecell ~]# modprobe rds_tcp
[root@storagecell ~]# modprobe rds_rdma
[root@storagecell ~]# su - celladmin

[celladmin@storagecell ~]$ cellcli -e alter cell restart services all
CELL-01509: Restart Server (RS) not responding.
Starting the RS, CELLSRV, and MS services…
Getting the state of RS services… running
Starting CELLSRV services…
The STARTUP of CELLSRV services was not successful.
CELL-01547: CELLSRV startup failed due to unknown reasons.
Starting MS services…
The STARTUP of MS services was successful.

The error in not unknown (as stated) but well known and expected : [Required IP parameters missing]
So, set the interconnect (= InfiniBand connection)
[celladmin@storagecell ~]$ cellcli -e create cell storagecell interconnect1=eth0
Cell stocell1 successfully created
Starting CELLSRV services…
The STARTUP of CELLSRV services was successful.
Flash cell disks, FlashCache, and FlashLog will be created…
CellDisk FD_00_stocell1 successfully created
CellDisk FD_01_stocell1 successfully created
CellDisk FD_02_stocell1 successfully created
CellDisk FD_03_stocell1 successfully created
CellDisk FD_04_stocell1 successfully created
CellDisk FD_05_stocell1 successfully created
Flash log stocell1_FLASHLOG successfully created
Flash cache stocell1_FLASHCACHE successfully created

(I’m not sure why Flash components are auto configured, but it can be modified later if needed)
Configure cell disks
[celladmin@storagecell ~]$ cellcli -e create celldisk all
CellDisk CD_DISK01_stocell1 successfully created
CellDisk CD_DISK02_stocell1 successfully created
CellDisk CD_DISK03_stocell1 successfully created
CellDisk CD_DISK04_stocell1 successfully created
CellDisk CD_DISK05_stocell1 successfully created
CellDisk CD_DISK06_stocell1 successfully created
CellDisk CD_DISK07_stocell1 successfully created
CellDisk CD_DISK08_stocell1 successfully created
CellDisk CD_DISK09_stocell1 successfully created
CellDisk CD_DISK10_stocell1 successfully created
CellDisk CD_DISK11_stocell1 successfully created
CellDisk CD_DISK12_stocell1 successfully created

and grid disks
[celladmin@storagecell ~]$ cellcli -e create griddisk all harddisk prefix=DATA
GridDisk DATA_CD_DISK01_stocell1 successfully created
GridDisk DATA_CD_DISK02_stocell1 successfully created
GridDisk DATA_CD_DISK03_stocell1 successfully created
GridDisk DATA_CD_DISK04_stocell1 successfully created
GridDisk DATA_CD_DISK05_stocell1 successfully created
GridDisk DATA_CD_DISK06_stocell1 successfully created
GridDisk DATA_CD_DISK07_stocell1 successfully created
GridDisk DATA_CD_DISK08_stocell1 successfully created
GridDisk DATA_CD_DISK09_stocell1 successfully created
GridDisk DATA_CD_DISK10_stocell1 successfully created
GridDisk DATA_CD_DISK11_stocell1 successfully created
GridDisk DATA_CD_DISK12_stocell1 successfully created

Ø  Now configure another Machine for GI setup.

Note:- Some command & scripts not working on Fake H/W module of Exa-data. Like (imageinfo, imagehistory, patching etc.. doesn’t work on it).


And special thanks to my friends/Bro..
Skant Gupta, Vj sharma for completing this doc.   

Wednesday, 28 September 2016

RMAN real usage of MAXSETSIZE, MAXPIECESIZE, FILESPERSET, SECTION SIZE, MAXOPENFILES



http://www.dbarj.com.br/en/2015/02/rman-real-usage-maxsetsize-maxpiecesize-filesperset-section-size-maxopenfiles/

http://oracleinaction.com/tune-rman-i/

You are trying to install the 64-bit Cell rpm but a 32-bit JDK is installed. You should remove the 32-bit JDK rpm and use the cell binary to install the 64-bit JDK and the 64 bit CELL

Some Work Arround and fix this issue.

Below the error When I'm installing the cell rpm > After extracting cell.bin > install jdk > install cell but getting below error.


***************************************************************************************

[root@storagecell mnt]# rpm -ivh cell-11.2.3.2.1_LINUX.X64_130109-1.x86_64.rpm
Preparing...                ########################################### [100%]
You are trying to install the 64-bit Cell rpm but a 32-bit JDK is installed. You should remove the 32-bit JDK rpm and use the cell binary to install the 64-bit JDK and the 64 bit CELL
error: %pre(cell-11.2.3.2.1_LINUX.X64_130109-1.x86_64) scriptlet failed, exit status 4
error:   install: %pre scriptlet failed (2), skipping cell-11.2.3.2.1_LINUX.X64_130109-1

***************************************************************************************

[root@storagecell ~]# rpm -qa | grep jdk
jdk-1.5.0_15-fcs.x86_64
java-1.6.0-openjdk-devel-1.6.0.0-1.39.1.9.7.el6
java-1.6.0-openjdk-1.6.0.0-1.39.1.9.7.el6

***************************************************************************************
-----------
Solution:
-----------

RH6 “rpm -q” gives output with a different format from RH5.
That produces an error when pre script checks the installed rpm.

.rpmmacros in your home should fix the problem.

Creat .rpmmacro @ root home and change the query format.

[root@storagecell ~]# vim .rpmmacros

[root@storagecell ~]# rpm -qa | grep jdk
jdk-1.5.0_15-fcs
java-1.6.0-openjdk-devel-1.6.0.0-1.39.1.9.7.el6
java-1.6.0-openjdk-1.6.0.0-1.39.1.9.7.el6


[root@stocell1 ~]# cat .rpmmacros
%_query_all_fmt %%{name}-%%{version}-%%{release}



[root@storagecell mnt]# rpm -ivh cell-11.2.3.2.1_LINUX.X64_130109-1.x86_64.rpm
Preparing...                ########################################### [100%]
Pre Installation steps in progress ...
ping: unknown host storagecell.localdomain
Check hostname setting. Cannot install cell
error: %pre(cell-11.2.3.2.1_LINUX.X64_130109-1.x86_64) scriptlet failed, exit status 5
error:   install: %pre scriptlet failed (2), skipping cell-11.2.3.2.1_LINUX.X64_130109-1

[root@storagecell mnt]# vim /etc/hosts

[root@storagecell mnt]# cat /etc/hosts
127.0.0.1   storagecell storagecell.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

[root@storagecell mnt]# rpm -ivh cell-11.2.3.2.1_LINUX.X64_130109-1.x86_64.rpm
Preparing...                ########################################### [100%]
Pre Installation steps in progress ...
   1:cell                   ########################################### [100%]
Post Installation steps in progress ...
Set cellusers group for /opt/oracle/cell11.2.3.2.1_LINUX.X64_130109/cellsrv/deploy/log directory
Set 775 permissions for /opt/oracle/cell11.2.3.2.1_LINUX.X64_130109/cellsrv/deploy/log directory
/
/
Installation SUCCESSFUL.
Starting RS and MS... as user celladmin
Done. Please Login as user celladmin and create cell to startup CELLSRV to complete cell configuration.
WARNING: Using the current shell as root to restart cell services.
Restart the cell services using a new shell.

[root@storagecell mnt]#

Monday, 19 September 2016

RECOVER ORACLE DATAFILES WITH NO DOWNTIME


What will happen if dbf file accidentally deleted when database is still open and how to recover it?

And Lots of time interviewer asking this question (So first method is here and 2nd method is restore RMAN backup). Anyways come to the point and here we go.....

On Unix/Linux, when a file is deleted, but a process still has the file open, the file is still there in the filesystem, and only the inode is removed.
But the process can continue to use its file handle, and the file can also be accessible under /proc/<pid>/fd .

In the following example, we use that behavior to recover a lost datafile after is has been dropped from the os (with rm) but the datafile is still open by the background processes.

First, we create a tablespace, and populate a table in it.

----------------------------
Create a tablespace: 
----------------------------

SQL> create tablespace TEST datafile '/u01/app/oracle/oradata/orcl/test01.dbf' size 10M;
Tablespace created.

-------------------------------------
Create a table in tablespace: 
-------------------------------------

SQL> create table ORATAB tablespace TEST as select * from dba_objects;
Table created.

----------------------------------------------
Check that table data is accessible: 
----------------------------------------------

SQL> select count(*) from ORATAB;

  COUNT(*)
----------
     71902

SQL> exit

Disconnected from Oracle Database 11g Enterprise Edition Release 11.2.0.1.0 - 64bit Production

-----------------------------------------------------------
Then, we remove the datafile from unix CLI.
-----------------------------------------------------------
here is the datafile 

ls -l /u01/app/oracle/oradata/orcl/test_rm.dbf
-rw-r—– 1 oracle dba 10493952 Sep 19 15:21 24 /u01/app/oracle/oradata/orcl/test01.dbf

we ‘accidently’ remove the datafile 

rm /u01/app/oracle/oradata/orcl/test01.dbf

ls -l /u01/app/oracle/oradata/orcl/test01.dbf

ls: /u01/app/oracle/oradata/orcl/test01.dbf: no such file or directory

--------------------------------
Here the datafile is lost.
Now we connect again.
-------------------------------

sqlplus / as sysdba

Connected to:
Oracle Database 11g Enterprise Edition Release 11.2.0.1.0 - 64bit Production

-------------------------------------------------------------------------------------------
Check if table data is accessible & you getting below error some times: 
-------------------------------------------------------------------------------------------

SQL> select count(*) from ORATAB;

select * from ORATAB
*
ERROR at line 1:
ORA-01116: error in opening database file 5
ORA-01110: data file 5: '/u01/app/oracle/oradata/orcl/test01.dbf'
ORA-27041: unable to open file
Linux Error: 2: No such file or directory
Additional information: 3

--------------------------------------------------------------
The datafile is lost and data is not accessible.
--------------------------------------------------------------

--------------------------------------------------------------------------------------------------------------------------
However, the datafile should still have an open file descriptor by an oracle background process
--------------------------------------------------------------------------------------------------------------------------

Check the dbwriter pid: 

ps -edf | grep dbw

[oracle@host01 ~]$ ps -edf | grep dbw
oracle    6350     1  0 13:09 ?        00:00:01 ora_dbw0_orcl
oracle    8237  8212  0 15:20 pts/1    00:00:00 grep dbw


List the deleted file handles for that DBWRITER process.
ls -l /proc/_PID_OF_DBWRITER/fd | grep deleted

or

Check its opened file descriptors for our file: 
[oracle@host01 ~]$ ls -l /proc/6350/fd | grep test
lrwx------ 1 oracle oinstall 64 Sep 19 15:21 24 -> /u01/app/oracle/oradata/orcl/test01.dbf (deleted)

here it is: 
[oracle@host01 ~]$ ls -l /proc/6350/fd/24
lrwx------ 1 oracle oinstall 64 Sep 19 15:21 /proc/6350/fd/24 -> /u01/app/oracle/oradata/orcl/test01.dbf (deleted)

In some other unix, lsof may be needed to map the file descriptor with the deleted file name

first we set a symbolic link so that oracle can see it as it was before the delete:
    ln -s /proc/6350/fd/24 -> /u01/app/oracle/oradata/orcl/test01.dbf

here data is accessible, but that will be lost if dbwriter closes it file handle (i.e if the database is closed)

----------------------------------------------------------------------------------------------------------------------------
However we can now set the tablespace read only so that it is checkpointed, and no writes occurs on it.
----------------------------------------------------------------------------------------------------------------------------
SQL> alter tablespace TEST read only;
Tablespace altered.

Now copy the file safely.

then we drop the symbolic link: 
     rm /u01/app/oracle/oradata/orcl/test01.dbf
     ls -l /u01/app/oracle/oradata/orcl/test01.dbf

     ls: /u01/app/oracle/oradata/orcl/test.dbf: No such file or directory

and we can now copy the file 
    cp -p /proc/6350/fd/24 /u01/app/oracle/oradata/orcl/test01.dbf
    ls -l /u01/app/oracle/oradata/orcl/test01.dbf

-rw-r—– 1 oracle dba 10493952 Sep 19 14:54 /u01/app/oracle/oradata/orcl/test01.dbf

And datafile is now available again.

-----------------------------------------------------------------------
We have it back, lets put the tablespace back in read/write 
------------------------------------------------------------------------
SQL> alter tablespace test_rm read write;
Tablespace altered.

-------------------------------
Check data is still there: 
-------------------------------
SQL> select count(*) from ORATAB;

  COUNT(*)
----------
     71902

This is not to be used like that in production. This is unsupported and may behave differently on different unix/linux or oracle flavours.

Monday, 29 August 2016

Data Guard Archive Tracing



- You can trace the archive logs on either the primary or the standby database by setting the log_archive_trace initialization parameter

 alter system set log_archive_trace=8

- Trace files located in user_dump_dest location.

- On the primary database, the log_archive_trace parameter controls the output of the 
ARCn (archiver),
FAL (fetcharchived log), and the
LGWR (log writer) background processes.
On the standby databases, it traces the work of the
ARCn,
RFS (remote file server), and the
FAL processes.

- You can specify any of 17 levels of archive log tracing.

 0: Disables archivelog tracing (default)
 1: Tracks archival of redo log file
 2: Tracks archival status of each archivelog destination
 4: Tracks archival operational phase
 8: Tracks archivelog destination activity
 16: Tracks detailed archivelog destination activity
 32: Tracks archivelog destination parameter modifications
 64: Tracks ARCn process state activity
 128: Tracks FAL (fetch archived log) server related activities
 256: Tracks RFS Logical Client
 512: Tracks LGWR redo shipping network activity
 1024: Tracks RFS Physical Client
 2048: Tracks RFS/ARCn Ping Heartbeat
 4096: Tracks Real Time Apply
 8192: Tracks Redo Apply (Media Recovery or Physical Standby)
 16384: Tracks redo transport buffer management

 32768: Tracks LogMiner dictionary

If you specify Level 17, the trace file will include trace information from Levels 1, 2, 4, 8 and 16.

kupprdp: master process DM00 started


Alert log:
=======

kupprdp: master process DM00 started with pid=42, OS id=7353
         to execute - SYS.KUPM$MCP.MAIN('SYS_EXPORT_TABLE_01', 'S2', 'KUPC$C_1_20160821122303', 'KUPC$S_1_20160821122303', 0);
kupprdp: worker process DW01 started with worker id=1, pid=25, OS id=7355
         to execute - SYS.KUPW$WORKER.MAIN('SYS_EXPORT_TABLE_01', 'S2');

Solutions.
========

solution

SQL> select job_name,state from dba_datapump_jobs;

JOB_NAME                       STATE
—————————— ——————————
SYS_EXPORT_FULL_02             EXECUTING
SYS_EXPORT_FULL_01             NOT RUNNING

 correction on my instruction above:

– just take note on job_name and owner value.
– in SQLPlus execute DBMS_DATAPUMP.STOP_JOB as follows:
exec DBMS_DATAPUMP.STOP_JOB (DBMS_DATAPUMP.ATTACH(‘JOB_NAME’,’OWNER’,1.0)
– ‘1’ is to abort the job immediately
– ‘0’ is to remove the job from the job list so that it is not restartable

SQL> exec DBMS_DATAPUMP.STOP_JOB (DBMS_DATAPUMP.ATTACH(‘SYS_EXPORT_SCHEMA_01′,’SYSTEM’),1,0);

COMPRESSED INCREMENTAL BACKUP

COMPRESSED INCREMENTAL ONLINE BACKUP PLUS ARCHIVELOG FILES AND AUTO DELETE OF ARCHIVELOG


INCREMENTAL LEVEL 0 BACKUP

RUN
{
BACKUP AS COMPRESSED BACKUPSET INCREMENTAL LEVEL 0
DATABASE PLUS ARCHIVELOG;
DELETE NOPROMPT ARCHIVELOG ALL BACKED UP 1 TIMES TO DISK;
}


INCREMENTAL LEVEL 1 BACKUP

RUN
{
BACKUP AS COMPRESSED BACKUPSET INCREMENTAL LEVEL 1
DATABASE PLUS ARCHIVELOG;
DELETE NOPROMPT ARCHIVELOG ALL BACKED UP 1 TIMES TO DISK;
}



Reference:

Saturday, 27 August 2016

SQLTuning Advisor (Manually)

Generate addm/ash/awr see report.
Step 0) In order to execute Sql Tuning Adviser API, one must be granted with “ADVISER” role.
grant adviser to <user>;

Step 1) The first step using SQL Tuning Adviser is to create a tuning task using DBMS_SQLTUNE.CREATE_TUNING_TASK.
— for a specific statement from AWR

SET SERVEROUTPUT ON

-- Tuning task created for specific a statement from the AWR.
DECLARE
  l_sql_tune_task_id  VARCHAR2(100);
BEGIN
  l_sql_tune_task_id := DBMS_SQLTUNE.create_tuning_task (
                          begin_snap  => 764,
                          end_snap    => 938,
                          sql_id      => '19v5guvsgcd1v',
                          scope       => DBMS_SQLTUNE.scope_comprehensive,
                          time_limit  => 60,
                          task_name   => '19v5guvsgcd1v_AWR_tuning_task',
                          description => 'Tuning task for statement 19v5guvsgcd1v in AWR.');
  DBMS_OUTPUT.put_line('l_sql_tune_task_id: ' || l_sql_tune_task_id);
END;

Step 2) — or for a specific statement from Shared Library Cache

DECLARE
  l_sql_tune_task_id  VARCHAR2(100);
BEGIN
  l_sql_tune_task_id := DBMS_SQLTUNE.create_tuning_task (
                          sql_id      => 'fhahkc71k304u',
                          scope       => DBMS_SQLTUNE.scope_comprehensive,
                          time_limit  => 60,
                          task_name   => 'fhahkc71k304u_tuning_task',
                          description => 'Tuning task for statement fhahkc71k304u.');
  DBMS_OUTPUT.put_line('l_sql_tune_task_id: ' || l_sql_tune_task_id);
END;
/

Step 3) — or for a specific statement given manually
During the execution phase you may wish to pause and restart the task, cancel it or reset the task to allow it to be re-executed:
Interrupt and resume a tuning task.
EXEC DBMS_SQLTUNE.interrupt_tuning_task (task_name => '19v5guvsgcd1v_AWR_tuning_task');
EXEC DBMS_SQLTUNE.resume_tuning_task (task_name => '19v5guvsgcd1v_AWR_tuning_task');

-- Cancel a tuning task.
EXEC DBMS_SQLTUNE.cancel_tuning_task (task_name => '19v5guvsgcd1v_AWR_tuning_task');

-- Reset a tuning task allowing it to be re-executed.
EXEC DBMS_SQLTUNE.reset_tuning_task (task_name => '19v5guvsgcd1v_AWR_tuning_task');

——————————————————————————————
Step 4 create a tuning task from AWR
——————————————————————————————
get snap ids of today

sys@goldprod> select SNAP_ID, BEGIN_INTERVAL_TIME from dba_hist_snapshot where BEGIN_INTERVAL_TIME > trunc(sysdate) order by snap_id;

   SNAP_ID BEGIN_INTERVAL_TIME
---------- ---------------------------------------------------------------------------
      4042 28-APR-11 12.00.29.410 AM
      4043 28-APR-11 01.01.01.094 AM
      4044 28-APR-11 02.00.32.909 AM
      4045 28-APR-11 03.00.07.558 AM
      4046 28-APR-11 04.00.40.121 AM
      4047 28-APR-11 05.00.14.894 AM
      4048 28-APR-11 06.00.59.123 AM
      4049 28-APR-11 07.00.23.056 AM
      4050 28-APR-11 08.00.51.205 AM
      4051 28-APR-11 09.00.19.892 AM
      4052 28-APR-11 10.00.35.227 AM
      4053 28-APR-11 11.00.02.168 AM
      4054 28-APR-11 12.00.37.690 PM
      4055 28-APR-11 01.00.09.106 PM

DECLARE
  l_sql_tune_task_id  VARCHAR2(100);
BEGIN
  l_sql_tune_task_id := DBMS_SQLTUNE.create_tuning_task (
                          begin_snap  => 4042,
                          end_snap    => 4055,
                          sql_id      => 'fhahkc71k304u',
                          scope       => DBMS_SQLTUNE.scope_comprehensive,
                          time_limit  => 60,
                          task_name   => 'fhahkc71k304u_AWR_tuning_task',
                          description => 'Tuning task for statement fhahkc71k304u in AWR.');
  DBMS_OUTPUT.put_line('l_sql_tune_task_id: ' || l_sql_tune_task_id);
END;
/

PL/SQL procedure successfully completed.

Step 5) execute the tuning task

EXEC DBMS_SQLTUNE.execute_tuning_task(task_name => 'fhahkc71k304u_AWR_tuning_task');


Step 6) report tuning task findings

SET LONG 100000;
SET PAGESIZE 1000
SET LINESIZE 300
SELECT DBMS_SQLTUNE.report_tuning_task('fhahkc71k304u_AWR_tuning_task') AS recommendations FROM dual;
SET PAGESIZE 24

Step 7) accept recommendations

execute dbms_sqltune.accept_sql_profile(task_name =>'fhahkc71k304u_AWR_tuning_task', replace => TRUE);

Step 8) verify if the sql profile is used.