Monday 14 August 2017

DNS SERVER

DNS configure without CHROOT on RHEL/OEL For Oracle RAC


[root@dns1 ~]# rpm -qa | grep -i bind
ypbind-1.19-12.el5_6.1
kdebindings-3.5.4-6.el5
bind-utils-9.3.6-20.P1.el5_8.6
system-config-bind-4.0.3-6.0.1.el5
bind-libs-9.3.6-20.P1.el5_8.6
bind-9.3.6-20.P1.el5_8.6

[root@dns1 ~]# hostname
dns1.testenv.com



----- Create/Edit /etc/named.conf File ------

[root@dns1 ~]# cat /etc/named.conf
options {
        listen-on port 53 { 192.168.2.200; };
        directory "/var/named";
        dump-file "/var/named/data/cache_dump.db";
        statistics-file "/var/named/data/named_stats.txt";
        /*
         * If there is a firewall between you and nameservers you want
         * to talk to, you might need to uncomment the query-source
         * directive below.  Previous versions of BIND always asked
         * questions using port 53, but BIND 8.1 uses an unprivileged
         * port by default.
         */
         // query-source address * port 53;

        // My Additions
        // Forwarder: Anything this DNS can't resolve gets forwarded to my ISPs DNS.
        #forwarders { 194.168.4.100; 194.168.8.100; };
        // End My Additions
};

zone "testenv.com." IN {
                 type master;
                 file "testenv.com.zone";
                 allow-update { none; };
};

zone "2.168.192.in-addr.arpa." IN {
        type master;
        file "2.168.192.in-addr.arpa";
        allow-update { none; };
};

include "/etc/rndc.key";




----------- Create/Edit /var/named/2.168.192.in-addr.arpa File ----------------
[root@dns1 named]# pwd
/var/named
[root@dns1 named]# cat 2.168.192.in-addr.arpa
$ORIGIN 2.168.192.in-addr.arpa.
$TTL 1H
@          IN     SOA    testenv.com.  root.testenv.com. (
                         42 ; serial (d. adams)
                         3H ; refresh
                        15M ; retry
                         1W ; expiry
                         1D ) ; minimum

2.168.192.in-addr.arpa.   IN   NS     testenv.com.
151 IN PTR rac1.testenv.com.
152 IN PTR rac2.testenv.com.
161 IN PTR rac1-vip.testenv.com.
162 IN PTR rac2-vip.testenv.com.
51  IN PTR rac-scan.testenv.com.
52  IN PTR rac-scan.testenv.com.
53  IN PTR rac-scan.testenv.com.


------------------ Create/Edit /var/named/testenv.com.zone File --------

[root@dns1 named]# cat testenv.com.zone
$TTL    86400
@               IN SOA  testenv.com root.testenv.com (
                                        42              ; serial (d. adams)
                                        3H              ; refresh
                                        15M             ; retry
                                        1W              ; expiry
                                        1D )            ; minimum
                IN NS   testenv.com
rac1            IN A    192.168.2.151
rac2            IN A    192.168.2.152
rac1-priv       IN A    192.168.3.151
rac2-priv       IN A    192.168.3.152
rac1-vip        IN A    192.168.2.161
rac2-vip        IN A    192.168.2.162
rac-scan        IN A    192.168.2.51
rac-scan        IN A    192.168.2.52
rac-scan        IN A    192.168.2.53



-------- Give the Ownership --------

cd /var/named
chown named:named 2.168.192.in-addr.arpa
chown named:named testenv.com.zone

chkconfig named on

[root@dns1 named]# cat /etc/resolv.conf
nameserver 192.168.2.200



--------- Check the its working or not ----------

[root@dns1 named]# nslookup rac-scan
Server:         192.168.2.200
Address:        192.168.2.200#53

Name:   rac-scan.testenv.com
Address: 192.168.2.53
Name:   rac-scan.testenv.com
Address: 192.168.2.51
Name:   rac-scan.testenv.com
Address: 192.168.2.52


[root@dns1 named]# dig testenv.com

; <<>> DiG 9.3.6-P1-RedHat-9.3.6-20.P1.el5_8.6 <<>> testenv.com
;; global options:  printcmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 50541
;; flags: qr aa rd ra; QUERY: 1, ANSWER: 0, AUTHORITY: 1, ADDITIONAL: 0

;; QUESTION SECTION:
;testenv.com.                   IN      A

;; AUTHORITY SECTION:
testenv.com.            86400   IN      SOA     testenv.com.testenv.com. root.testenv.com.testenv.com. 42 10800 900 604800 86400

;; Query time: 3 msec
;; SERVER: 192.168.2.200#53(192.168.2.200)
;; WHEN: Wed Jul  5 11:31:57 2017
;; MSG SIZE  rcvd: 82


---------------------------------------------------------------------------------------------------------------------------
=============
Now RAC Part
=============


Configure one node if working well athen follow all steps on all nodes.

[root@rac1 ~]# cat /etc/hosts
# Do not remove the following line, or various programs
# that require network functionality will fail.
127.0.0.1               rac1.testenv.com rac1 localhost.localdomain localhost
::1             localhost6.localdomain6 localhost6
#Public-IP
192.168.2.151  rac1  rac1.testenv.com
192.168.2.152  rac2  rac2.testenv.com

#Virtual-IP
192.168.2.161  rac1-vip      rac1-vip.testenv.com
192.168.2.162 rac2-vip      rac2-vip.testenv.com

#Scan-IP
192.168.198.51  rac-scan        rac-scan.testenv.com
192.168.2.52    rac-scan        rac-scan.testenv.com
192.168.2.53    rac-scan        rac-scan.testenv.com

192.168.2.200   dns1    dns1.testenv.com


[root@rac1 ~]# hostname
rac1.testenv.com


[root@rac1 ~]# cat /etc/resolv.conf
search testenv.com      dns1.testenv.com
nameserver 192.168.2.200


[root@rac1 ~]# dig rac1

; <<>> DiG 9.3.6-P1-RedHat-9.3.6-16.P1.el5 <<>> rac1
;; global options:  printcmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: SERVFAIL, id: 63859
;; flags: qr rd ra; QUERY: 1, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 0

;; QUESTION SECTION:
;rac1.                          IN      A

;; Query time: 4 msec
;; SERVER: 192.168.2.200#53(192.168.2.200)
;; WHEN: Wed Jul  5 11:36:32 2017
;; MSG SIZE  rcvd: 22



[root@rac1 ~]# dig rac-scan

; <<>> DiG 9.3.6-P1-RedHat-9.3.6-16.P1.el5 <<>> rac-scan
;; global options:  printcmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: SERVFAIL, id: 359
;; flags: qr rd ra; QUERY: 1, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 0

;; QUESTION SECTION:
;rac-scan.                      IN      A

;; Query time: 13 msec
;; SERVER: 192.168.2.200#53(192.168.2.200)
;; WHEN: Wed Jul  5 11:36:05 2017
;; MSG SIZE  rcvd: 26



Now its working fine ... above the setting replicate on all nodes.




Data Guard Flow



PHYSICAL STANDBY DATA FLOW
Let us understand how the data flows in data guard setup as described above by Points 1 to 8 :

Point 1) On Primary Database, Transactions starts. All the buffer cache locks (exclusive locks) that are required for the transaction are acquired.

Point 2) On Primary Database, the redo blocks that describes the changes (or change vectors) are generated and stored in the processes’ Program Global Area (PGA). After successfully acquiring the redo allocation latch, space is then allocated in the redo log buffer. The redo generated then gets copied from the processes’ PGA into the redo log buffer.

Point 3) On Primary Database, The oracle foreground process tells the LGWR to flush the redo log buffers to disk. Remember that the database blocks in the database have not yet been updated with DML changes. The LGWR flushes the redo buffers to the ORL and acknowledges the completion to the session. At this point, the transaction is persistent on disk. No commit has occurred thus far.


At some future time, the database buffers that were previously changed will be written to disk by the database writer process (DBWR) at checkpoint time. This point is not marked in above diagram.
Note that before the DBWR process has flushed the database buffers to disks, the LGWR process must have already written the redo buffers to disk. This explicit sequence is enforced by the write-ahead logging protocol.
Also The ARCH process on the primary database archives the ORLs into archive log files. This point is also not marked in the above diagram.


Point 4) On Primary Database, the LNS process reads the recently flushed redo from the redo log buffer and sends the redo data to the standby database using the redo transport destination (LOG_ARCHIVE_DEST_n) that we defined during standby database creation. We are using ASYNC transport method, so the LGWR does not wait for any acknowledgment from the LNS for this network send operation. It does not communicate with the LNS except to start it up at the database start stage and after a failure of a standby connection.

Point 5) On Standby Database , the RFS reads the redo stream from the network socket into the network buffers, and then it writes this redo stream to the SRL.

Point 6) On Standby Database, The ARCH process archives the SRLs into archive log files when a log switch occurs at the primary database. The generated archive log file is then registered with the standby control file.
flow involves three distinct phases, as follows:

Point 7) On standby database, the actual recovery process starts from this step. The managed recovery process (MRP) will asynchronously read ahead the redo from the SRLs or the archived redo logs (when recovery falls behind or is not in real-time apply mode). The blocks that require redo apply are parsed out and placed into appropriate in-memory map segments.

Point 8) On standby database, the MRP process ships redo to the recovery slaves using the parallel query (PQ) interprocess communication framework. Parallel media
recovery (PMR) causes the required data blocks to be read into the buffer cache, and subsequently redo will be applied to these buffer cache buffers.


At checkpoint phase, the recently modified buffers (modified by the parallel recovery slaves) will be flushed to disk and also the update of datafile headers to record checkpoint completion.


PHYSICAL STANDBY DATABASE  RELATED PROCESSES
All the important processes are created in CIRCLE above diagram

On the Primary Database:

LGWR : The log writer process flushes log buffers from the SGA to Online Redo Log files.

LNS : The LogWriter Network Service (LNS below 12c version) reads the redo being flushed from the redo buffers by the LGWR and sends the redo over network to the standby database. The
main purpose of the LNS process is to free up the LGWR process from performing the redo transport role.

ARCH  : The archiver processes archives the ORL files to archive log files. Up to 30 ARCH processes can exist, and these ARCH processes are also used to fulfill gap resolution requests. Note that one ARCH process has a special role in that it is dedicated to local redo log archiving only and never communicates with a standby database.

On the Standby Database:

RFS : The main objective of the Remote File Server process is to perform a network receive of redo transmitted from the primary site and then writes the network buffer (redo data) to the standby redo log (SRL) files.

ARCH : The archive processes on the standby site perform the same functions performed on the primary site, except that on the standby site, an ARCH process generates archived log files from the SRLs.

MRP : The managed recovery process coordinates media recovery management. Remember that a physical standby is in perpetual recovery mode.

Basically we can categorize physical standby database  into three major components:

1) Data Guard Redo Transport Services
– To transfer the redo that is generated by the primary database to the standby database.
Point 4 and 5 in the above diagram are where Redo Transport works.

2) Data Guard Apply Services  
– To receive and apply the redo sent by Redo Transport Services to the standby database.
Point 7 and 8 in the above diagram are where Redo Apply works.

3) Data Guard Role Management Services
– To assist in the database role changes in switchover and failover scenarios.
This service works in the background and takes care of switchover/failover scenarios



GAP RESOLUTION FLOW


How to resolve the gap in between primary and standby.

When MRP finds that an archive log is missing during media recovery, it sends the fal_client information to the server identified by fal_server and requests fal_server to resend the file again.  The fal_client and fal_server init.ora (spfile) parameters are set on the standby instance.

The Oracle docs note that fal_server specifies the FAL (fetch archive log) server for a standby database. The value for fal_server is an Oracle*Net service name, which is assumed to be configured properly on the standby database system to point to the desired FAL server.


1.       The fetch archive log (FAL) client is the MRP process.  The fetch archive log (FAL) server is a foreground process that runs on the primary database and services the fetch archive log requests coming from the FAL client.  A separate FAL server is created for each incoming FAL client. 
2.       Fal_server and fal_client foreground process.
3.       Its resolve the gap in automatic mode when you define these parameter fal_Client and fal_server.
4.       Above diagram background process work to resolve the gap in automatic mode ARCH to RFS.
5.       If you not setup this parameter then manually transfer the log from primary to standby and register the archive logs to standby and perform recovery.