I wrote the following script but I have an issue with memory consumption, pandas is allocating more than 30 G of ram, where the sum of data files is roughly 18 G
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import time
mean_wo = pd.DataFrame()
mean_w = pd.DataFrame()
std_w = pd.DataFrame()
std_wo = pd.DataFrame()
start_time=time.time() #taking current time as starting time
data_files=['2012.h5','2013.h5','2014.h5','2015.h5', '2016.h5', '2008_2011.h5']
for data_file in data_files:
print data_file
df = pd.read_hdf(data_file)
grouped = df.groupby('day')
mean_wo_tmp=grouped['Significance_without_muons'].agg([np.mean])
mean_w_tmp=grouped['Significance_with_muons'].agg([np.mean])
std_wo_tmp=grouped['Significance_without_muons'].agg([np.std])
std_w_tmp=grouped['Significance_with_muons'].agg([np.std])
mean_wo = pd.concat([mean_wo, mean_wo_tmp])
mean_w = pd.concat([mean_w, mean_w_tmp])
std_w = pd.concat([std_w,std_w_tmp])
std_wo = pd.concat([std_wo,std_wo_tmp])
print mean_wo.info()
print mean_w.info()
del df, grouped, mean_wo_tmp, mean_w_tmp, std_w_tmp, std_wo_tmp
std_wo=std_wo.reset_index()
std_w=std_w.reset_index()
mean_wo=mean_wo.reset_index()
mean_w=mean_w.reset_index()
#setting the field day as date
std_wo['day']= pd.to_datetime(std_wo['day'], format='%Y-%m-%d')
std_w['day']= pd.to_datetime(std_w['day'], format='%Y-%m-%d')
mean_w['day']= pd.to_datetime(mean_w['day'], format='%Y-%m-%d')
mean_wo['day']= pd.to_datetime(mean_w['day'], format='%Y-%m-%d')
So someone has an idea how to decrease the memory consumption?
Cheers,
