#python #linux and check (and delete if existing) a user

import pwd
 
def check_user_and_delete(user):
    try:
        if pwd.getpwnam(user) is True:
            res = sudo("userdel {}".format(user))
        if res.return_code == 0:
            print "== > User {} deleted".format(user)
        else:
            print "== > User {} not deleted".format(user)
    except KeyError:
            pass

I just love #python

def bits(n):
    res = ''
    """
    Generates the binary digits of n
    """
    while n:
        res+= str(n & 1)
        n >>= 1
    return res[::-1]

#Jenkins and #fabric script to execute commands remotely

tee /tmp/fabfile.py <<EOF
from fabric.api import *
import wget

env.hosts     = open('hostfile', 'r').readlines()
env.user      = open('userfile', 'r').readline().strip()
env.password  = open('passwordfile', 'r').readline().strip()


def vm_run_cmd(cmd, priv=True):
    if priv is True:
        sudo(cmd)
    else:
        run(cmd)


def vm_exec_remote_script(uri, privt=False):
        temp_f = wget.download(uri)
        inf = open(temp_f)
        vm_run_cmd(inf.read().replace('\n', ' '), privt)
        inf.close()
EOF

tee /tmp/hostfile <<EOF
localhost
EOF

tee /tmp/userfile <<EOF
xxxxxx
EOF

tee /tmp/passwordfile <<EOF
xxxxxx
EOF

cd /tmp/
virtualenv venv
source venv/bin/activate
fab vm_exec_remote_script:'https://raw.githubusercontent.com/Flukas88/CodeUtils/master/hello.bash',privt=$sa
fab vm_run_cmd:"$cmdline",priv=$sa
deactivate

#Bash and #Redis: how to dry a cache

 The only caveat is to use a version of redis > 2.8.0
#!/bin/bash
# delete keys from DBN cache on port PORT starting from CUR till the end in STEP steps on host HOST

CUR=0
PGNAME=`basename $0`
STEP=10000
PORT=$1
DB=$2
HOST=$3
EXEC="/export/local/redis/bin/redis-cli"

if [[ $# -ne 3 ]]; then
    echo "usage: $PGNAME HOST PORT DB_NUMBER"
    exit 55
fi

while :
do
    $EXEC -p "$PORT" -h "$HOST" -n "$DB" scan "$CUR" COUNT "$STEP" > keybatch.txt
    CUR=$(head -1 keybatch.txt)
    echo "CUR at $CUR"
    sed 's/^/del /' keybatch.txt > del.txt
    $EXEC -p "$PORT" -h "$HOST" -n "$DBN" < del.txt | wc  -l
    sleep 1
    if [[ $CUR -eq 0 ]]; then
        exit 0;
    fi
done

Little snippet for nagios client machine in Ansible

- hosts: local
  tasks:
  - name: install the epel repo
    yum: name=http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm state=installed
  - name: install the latest version of nrpe
    yum: name=nagios-nrpe state=latest
  - name: install the latest version of nagios-common
    yum: name=nagios-common state=latest
  - name: install the latest version of nagios-plugins
    yum: name=nagios-plugins state=latest
  - name: install the latest version of nagios-plugins-all
    yum: name=nagios-plugins-all state=latest
  - name: config nrpe
    lineinfile: dest=/etc/nagios/nrpe.cfg regexp=^allowed_hosts=.*$ line=allowed_hosts=xx.xx.xx.xx
  - name: restart nrpe
    service: name=nrpe state=restarted

Huge Pages

What are those pages and why would I want them to be huge?
When a process uses some memory, the CPU is marking the RAM as used by that process. For efficiency, the CPU allocate RAM by chunks of 4K bytes (it’s the default value on many platforms). Those chunks are named pages. Those pages can be swapped to disk, etc.

Since the process address space are virtual, the CPU and the operating system have to remember which page belong to which process, and where it is stored.
Obviously, the more pages you have, the more time it takes to find where the memory is mapped. When a process uses 1GB of memory, that’s 262144 entries to look up (1GB / 4K). If one Page Table Entry consume 8bytes, that’s 2MB (262144 * 8) to look-up.

How can I enable them?
The size of the huge pages in linux may change depending on the arch you are on

Architecture  huge page size
i386 4K and 4M (2M in PAE mode)
ia64 4K, 8K, 64K, 256K, 1M, 4M, 16M, 256M
ppc64 4K and 16M

You need to take the number below as a base

$ grep Hugepagesize /proc/meminfo
Hugepagesize:     2048 kB

and the calculate how much kB you want to dedicate to the process (the system will not be able to use them anymore, caveat!!)

# echo 512 > /proc/sys/vm/nr_hugepages

Is used for a runtime change (will disappear at reboot time).
To have a persistent change you have to use

# echo "vm.nr_hugepages=512" >> /etc/sysctl.conf

You can the check with

$ grep HugePages_Total /proc/meminfo
HugePages_Total:   512

Now you have to configure which user will be used for the huge pages (it will own the process of the app you want to use them with) in the /etc/security/limits.conf file.

userid          soft    memlock        PGSIZE*NUMBER
userid          hard    memlock        PGSIZE*NUMBER

How can I get statistics on the huge pages?
I wrote this simple bash script which can be used, having the name of the process, to know if that process is using or not the huge pages.

#!/bin/bash
 
PROCESS=$1
 
if (($# < 1))
then
        echo "$(basename $0) : Process to verify is needed";
        exit 255;
fi
for pid in $(ps -ef | grep $PROCESS | grep -v grep | awk '{print $2}')
do
     HUGE=$(grep huge /proc/$pid/numa_maps 2>/dev/null)
     RES=$?
     if [ $RES -eq 0 ]
     then
       echo -e "\e[0;32m[YES] Pid $pid is using hugepages"
     else
       echo -e "\e[0;91m[NO] Pid $pid is not using hugepages"
     fi
done