Skip to content

Fix zoneinfo parser #8

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 9 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
CC=gcc
CC?=gcc
CFLAGS=-I. -Wall -g
LDFLAGS=
OBJS=predict.o adaptivemmd.o
Expand All @@ -18,4 +18,3 @@ adaptivemmd: $(OBJS)

clean:
rm -f $(OBJS) adaptivemmd cscope.*

50 changes: 27 additions & 23 deletions adaptivemmd.c
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,8 @@

unsigned long min_wmark[MAX_NUMANODES], low_wmark[MAX_NUMANODES];
unsigned long high_wmark[MAX_NUMANODES], managed_pages[MAX_NUMANODES];
unsigned long total_free_pages, total_cache_pages, total_hugepages, base_psize;
long compaction_rate, reclaim_rate;
unsigned long total_free_pages, total_cache_pages, base_psize;
long compaction_rate, reclaim_rate, total_hugepages;
struct lsq_struct page_lsq[MAX_NUMANODES][MAX_ORDER];
int dry_run;
int debug_mode, verbose, del_lock = 0;
Expand Down Expand Up @@ -315,7 +315,7 @@ update_hugepages()
{
DIR *dp;
struct dirent *ep;
unsigned long newhpages = 0;
long newhpages = 0;
int rc = -1;

dp = opendir(HUGEPAGESINFO);
Expand Down Expand Up @@ -343,7 +343,7 @@ update_hugepages()
if (newhpages) {
unsigned long tmp;

tmp = abs(newhpages - total_hugepages);
tmp = llabs(newhpages - total_hugepages);
/*
* If number of hugepages changes from 0 to a
* positive number, percentage calculation will
Expand Down Expand Up @@ -376,8 +376,9 @@ update_hugepages()
#define ZONE_LOW "low"
#define ZONE_HIGH "high"
#define ZONE_MNGD "managed"
#define ZONE_PGST "pagesets"
int
#define ZONE_PROT "protection:"

void
update_zone_watermarks()
{
FILE *fp = NULL;
Expand All @@ -387,13 +388,16 @@ update_zone_watermarks()

fp = fopen(ZONEINFO, "r");
if (!fp)
return 0;
goto out_free;

while ((fgets(line, len, fp) != NULL)) {
if (strncmp(line, "Node", 4) == 0) {
char node[FLDLEN], zone[FLDLEN], zone_name[FLDLEN];
int nid;
unsigned long min, low, high, managed;
unsigned long min = 0;
unsigned long low = 0;
unsigned long high = 0;
unsigned long managed = 0;

sscanf(line, "%s %d, %s %8s\n", node, &nid, zone, zone_name);
if ((current_node == -1) || (current_node != nid)) {
Expand All @@ -413,10 +417,6 @@ update_zone_watermarks()
* Ignore pages in DMA zone for x86 and x86-64.
*/
if (!skip_dmazone || (strncmp("DMA", zone_name, FLDLEN) != 0)) {
/*
* We found the normal zone. Now look for
* line "pages free"
*/
if (fgets(line, len, fp) == NULL)
goto out;

Expand All @@ -436,7 +436,7 @@ update_zone_watermarks()
high = val;
if (strncmp(name, ZONE_MNGD, sizeof(ZONE_MNGD)) == 0)
managed = val;
if (strncmp(name, ZONE_PGST, sizeof(ZONE_PGST)) == 0)
if (strncmp(name, ZONE_PROT, sizeof(ZONE_PROT)) == 0)
break;
}

Expand All @@ -449,9 +449,9 @@ update_zone_watermarks()
}

out:
free(line);
fclose(fp);
return 0;
out_free:
free(line);
}

/*
Expand Down Expand Up @@ -511,14 +511,15 @@ no_pages_reclaimed()
FILE *fp = NULL;
size_t len = 100;
char *line = malloc(len);
unsigned long val, reclaimed;
unsigned long val = 0;
unsigned long reclaimed = 0;
char desc[100];

fp = fopen(VMSTAT, "r");
if (!fp)
return 0;
goto out;

total_cache_pages = reclaimed = 0;
total_cache_pages = 0;
while ((fgets(line, len, fp) != NULL)) {
sscanf(line, "%s %lu\n", desc, &val );
if (strcmp(desc, "pgsteal_kswapd") == 0)
Expand All @@ -533,8 +534,9 @@ no_pages_reclaimed()
total_cache_pages += val;
}

free(line);
fclose(fp);
out:
free(line);
return reclaimed;
}

Expand Down Expand Up @@ -756,7 +758,7 @@ rescale_watermarks(int scale_up)

log_info(1, "Adjusting watermarks. Current watermark scale factor = %s", scaled_wmark);
if (dry_run)
goto out;
return;

log_info(1, "New watermark scale factor = %ld", scaled_watermark);
sprintf(scaled_wmark, "%ld\n", scaled_watermark);
Expand Down Expand Up @@ -789,7 +791,7 @@ static int
check_permissions(void)
{
int fd;
char tmpstr[40];
char tmpstr[40] = {0};

/*
* Make sure running kernel supports watermark_scale_factor file
Expand All @@ -802,6 +804,7 @@ check_permissions(void)
/* Can we write to this file */
if (read(fd, tmpstr, sizeof(tmpstr)) < 0) {
log_err("Can not read "RESCALE_WMARK" (%s)", strerror(errno));
close(fd);
return 0;
}
close(fd);
Expand All @@ -812,6 +815,7 @@ check_permissions(void)

if (write(fd, tmpstr, strlen(tmpstr)) < 0) {
log_err("Can not write to "RESCALE_WMARK" (%s)", strerror(errno));
close(fd);
return 0;
}
close(fd);
Expand Down Expand Up @@ -1233,7 +1237,7 @@ parse_config()
* instead to doing it through adaptivemmd
*/
if (val > MAX_NEGDENTRY)
log_err("Bad value for negative dentry cap = %d (>%d). Proceeding with default of %d", val, MAX_NEGDENTRY, neg_dentry_pct);
log_err("Bad value for negative dentry cap = %ld (>%d). Proceeding with default of %d", val, MAX_NEGDENTRY, neg_dentry_pct);
else if (val < 1)
neg_dentry_pct = 1;
else
Expand Down Expand Up @@ -1415,7 +1419,7 @@ main(int argc, char **argv)
*/
base_psize = getpagesize()/1024;

pr_info("adaptivemmd "VERSION" started (verbose=%d, aggressiveness=%d, maxgap=%d)", verbose, aggressiveness, maxgap);
pr_info("adaptivemmd "VERSION" started (verbose=%d, aggressiveness=%d, maxgap=%lu)", verbose, aggressiveness, maxgap);

one_time_initializations();

Expand Down
10 changes: 5 additions & 5 deletions predict.c
Original file line number Diff line number Diff line change
Expand Up @@ -220,11 +220,11 @@ predict(struct frag_info *frag_vec, struct lsq_struct *lsq,
if (frag_vec[0].free_pages <= high_wmark) {
retval |= MEMPREDICT_RECLAIM;
log_info(2, "Reclamation recommended due to free pages being below high watermark");
log_info(2, "Consumption rate on node %d=%ld pages/msec, reclaim rate is %ld pages/msec, Free pages=%ld, low wmark=%ld, high wmark=%ld", nid, abs(m[0]), reclaim_rate, frag_vec[0].free_pages, low_wmark, high_wmark);
log_info(2, "Consumption rate on node %d=%lld pages/msec, reclaim rate is %ld pages/msec, Free pages=%lld, low wmark=%lu, high wmark=%lu", nid, llabs(m[0]), reclaim_rate, frag_vec[0].free_pages, low_wmark, high_wmark);
}
else {
time_taken = (frag_vec[0].free_pages - high_wmark)
/ abs(m[0]);
/ llabs(m[0]);

/*
* Time to reclaim frag_vec[0].free_pages - high_wmark
Expand All @@ -244,7 +244,7 @@ predict(struct frag_info *frag_vec, struct lsq_struct *lsq,
*/
if (time_taken <= (3*time_to_catchup)) {
log_info(3, "Reclamation recommended due to high memory consumption rate");
log_info(3, "Consumption rate on node %d=%ld pages/msec, reclaim rate is %ld pages/msec, Free pages=%ld, low wmark=%ld, high wmark=%ld", nid, abs(m[0]), reclaim_rate, frag_vec[0].free_pages, low_wmark, high_wmark);
log_info(3, "Consumption rate on node %d=%lld pages/msec, reclaim rate is %ld pages/msec, Free pages=%lld, low wmark=%lu, high wmark=%lu", nid, llabs(m[0]), reclaim_rate, frag_vec[0].free_pages, low_wmark, high_wmark);
log_info(3, "Time to below high watermark= %ld msec, time to catch up=%ld msec", time_taken, time_to_catchup);
retval |= MEMPREDICT_RECLAIM;
}
Expand Down Expand Up @@ -320,7 +320,7 @@ predict(struct frag_info *frag_vec, struct lsq_struct *lsq,
if (higher_order_pages < (m[order] * x_cross)) {
log_info(2, "Compaction recommended on node %d. Running out of order %d pages", nid, order);
if (order < (MAX_ORDER -1))
log_info(3, "No. of free order %d pages = %ld base pages, consumption rate=%ld pages/msec", order, (frag_vec[order+1].free_pages - frag_vec[order].free_pages), m[order]);
log_info(3, "No. of free order %d pages = %lld base pages, consumption rate=%lld pages/msec", order, (frag_vec[order+1].free_pages - frag_vec[order].free_pages), m[order]);
log_info(3, "Current compaction rate=%ld pages/msec", compaction_rate);
retval |= MEMPREDICT_COMPACT;
break;
Expand Down Expand Up @@ -351,7 +351,7 @@ predict(struct frag_info *frag_vec, struct lsq_struct *lsq,
if (time_taken >= time_to_catchup) {
log_info(3, "Compaction recommended on node %d. Order %d pages consumption rate is high", nid, order);
if (order < (MAX_ORDER -1))
log_info(3, "No. of free order %d pages = %ld base pages, consumption rate=%ld pages/msec", order, (frag_vec[order+1].free_pages - frag_vec[order].free_pages), m[order]);
log_info(3, "No. of free order %d pages = %lld base pages, consumption rate=%lld pages/msec", order, (frag_vec[order+1].free_pages - frag_vec[order].free_pages), m[order]);
log_info(3, "Current compaction rate=%ld pages/msec, Exhaustion in %ld msec", compaction_rate, time_taken);
retval |= MEMPREDICT_COMPACT;
break;
Expand Down
2 changes: 1 addition & 1 deletion predict.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ unsigned long predict(struct frag_info *, struct lsq_struct *,
/* Use pr_info to log info irrespective of verbosity level */
#define pr_info(...) log_msg(LOG_INFO, __VA_ARGS__)

extern void log_msg(int level, char *fmt, ...);
extern void log_msg(int level, char *fmt, ...) __attribute__((format(printf, 2, 3)));

#ifdef __cplusplus
}
Expand Down