Versions Compared

Key

  • This line was added.
  • This line was removed.
  • Formatting was changed.

...

Noprint
Cloak

The following figure illustrates the WRTC HA Architecture:

FigureHA ArchitectureHA ArchitectureBLOCK

for example Node 2 goes down, the user connected to the Node 2 will be get rehydrate through Node 3 or Node 1 in round robin manner

 

...

  1. Create a Cluster in EMS server with required cluster configuration. Refer to Configuring WRTC Using EMS.
  2. Register each WRTC nodes to the cluster with unique VNF ID. For example if you are registering three nodes Node 1, Node 2 and Node 3. Each of the nodes should have unique VNF ID, Same Cluster ID and IP address for each nodes.
  3. Configure WRTC load balancer to share traffic to WRTC upstream servers. If you are using NGINX as load balancer refer to WRTC Load Balancer Configuration to configure the same.

  4. Below is the sample configuration of NGINX for three WRTC nodes acting in HA mode.

    Code Block
    #user nobody;
    worker_processes 1;
    #error_log logs/error.log;
    #error_log logs/error.log notice;
    #error_log logs/error.log info;
    
    #pid logs/nginx.pid;
    
    
    events {
    worker_connections 1024;
    }
    
    http {
    
    include mime.types;
    default_type application/octet-stream;
    fastcgi_buffers 8 16k;
    fastcgi_buffer_size 32k;
    
    upstream ws_rack{
    # ip_hash;
    server 10.54.48.11:9080;
    server 10.54.48.12:9080;
    server 10.54.48.152:9080;
    }
    
    upstream http_rack_443{
    server 10.54.48.11:8088;
    server 10.54.48.12:8088;
    server 10.54.48.152:8088;
    
    }
    
    upstream http_rack_80{
    server 10.54.48.11:80;
    server 10.54.48.12:80;
    server 10.54.48.152:80;
    }
    
    upstream http_rack_8081{
    server 10.54.48.11:8081;
    server 10.54.48.12:8081;
    server 10.54.48.152:8081;
    }
    
    server {
    listen 9080 ssl;
    server_name 10.54.48.57;
    large_client_header_buffers 8 32k;
    
    ssl on;
    ssl_certificate /opt/nginx/ssl/server.crt;
    ssl_certificate_key /opt/nginx/ssl/server.key;
    ssl_session_timeout 5m;
    ssl_protocols SSLv2 SSLv3 TLSv1;
    ssl_ciphers HIGH:!aNULL:!MD5;
    ssl_prefer_server_ciphers on;
    #ssl_verify_client off;
    
    
    location / {
    #root html;
    # index index.html index.htm;
    proxy_pass http://ws_rack;
    proxy_set_header X-Real-IP $remote_addr;
    proxy_set_header Host $host;
    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
    
    # WebSocket support
    
    proxy_http_version 1.1;
    proxy_set_header Upgrade $http_upgrade;
    proxy_set_header Connection "upgrade";
    proxy_set_header Host $host;
    proxy_read_timeout 86400;
    proxy_redirect off;
    proxy_buffers 8 32k;
    proxy_buffer_size 64k;
    
    # proxy_ssl_session_reuse off;
    # proxy_set_header X_FORWARDED_PROTO https;
    
    }
    
    }
    
    server {
    listen 443 ssl;
    server_name 10.54.48.57;
    large_client_header_buffers 8 32k;
    ssl on;
    ssl_certificate /opt/nginx/ssl/server.crt;
    ssl_certificate_key /opt/nginx/ssl/server.key;
    ssl_session_timeout 5m;
    ssl_protocols SSLv2 SSLv3 TLSv1;
    ssl_ciphers HIGH:!aNULL:!MD5;
    ssl_prefer_server_ciphers on;
    
    #ssl_verify_client off;
    
    tcp_nopush on;
    
    location / {
    #root html;
    index index.html index.htm;
    proxy_pass http://http_rack_443/;
    proxy_set_header X-Real-IP $remote_addr;
    proxy_set_header Host $host;
    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
    proxy_buffers 8 16k;
    proxy_buffer_size 32k;
    fastcgi_buffers 8 16k;
    fastcgi_buffer_size 32k;
    proxy_read_timeout 300;
    proxy_connect_timeout 300;
    tcp_nopush on;
    }
    }
    
    
    server {
    
    listen 80;
    server_name 10.54.48.57;
    large_client_header_buffers 8 32k;
    
    #ssl_verify_client off;
    tcp_nopush on;
    
    location / {
    #root html;
    index index.html index.htm;
    proxy_pass http://http_rack_80/;
    proxy_set_header X-Real-IP $remote_addr;
    proxy_set_header Host $host;
    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
    proxy_buffers 8 16k;
    proxy_buffer_size 32k;
    fastcgi_buffers 8 16k;
    fastcgi_buffer_size 32k;
    proxy_read_timeout 300;
    proxy_connect_timeout 300;
    tcp_nopush on;
    
    }
    
    
    }
    
    
    server {
    
    listen 8081 ssl;
    server_name 10.54.48.57;
    large_client_header_buffers 8 32k;
    
    ssl on;
    ssl_certificate /opt/nginx/ssl/server.crt;
    ssl_certificate_key /opt/nginx/ssl/server.key;
    ssl_session_timeout 5m;
    ssl_protocols SSLv2 SSLv3 TLSv1;
    ssl_ciphers HIGH:!aNULL:!MD5;
    ssl_prefer_server_ciphers on;
    
    #ssl_verify_client off;
    tcp_nopush on;
    
    location / {
    #root html;
    
    index index.html index.htm;
    proxy_pass http://http_rack_8081/;
    proxy_set_header X-Real-IP $remote_addr;
    proxy_set_header Host $host;
    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
    proxy_buffers 8 16k;
    proxy_buffer_size 32k;
    fastcgi_buffers 8 16k;
    fastcgi_buffer_size 32k;
    proxy_read_timeout 300;
    proxy_connect_timeout 300;
    tcp_nopush on;
    
    }
    
    }
    
    }
    
  5. With the above configuration start  Node 1, by running the following command. Refer Initializing WRTC Node for more information.

    Code Block
    cd /opt/sonus/wrtc 
    ./wrtcnodeinit start
  6. After the Node 1 is registered with EMS, start WRTC application by running the following command.

    Code Block
     su - wrtc
    ./sonuswrtc start
  7. When Node 1 becomes online in EMS,  repeat Step 5 and 6 for Node 2 and  Node 3. Now all the nodes are started and  application is successfully running. Each node in the cluster will have other node details.  If any node goes down, the sessions served by failed node will be rehydrate to other node in the cluster in round robin manner.

Pagebreak