From: Shigeru Hanada Date: Mon, 7 Feb 2011 14:37:28 +0000 (+0900) Subject: Fix file_fdw to use stat() instead of lstat() to get size of the file. X-Git-Url: http://git.postgresql.org/gitweb/static/gitweb.js?a=commitdiff_plain;h=c12d8c059797d442b1e5557edc35d8cf6761e2be;p=users%2Fhanada%2Fpostgres.git Fix file_fdw to use stat() instead of lstat() to get size of the file. Because lstat() gives size of the link itself, not the linked file. --- diff --git a/contrib/file_fdw/file_fdw.c b/contrib/file_fdw/file_fdw.c index 3f2f6928fa..97c23de852 100644 --- a/contrib/file_fdw/file_fdw.c +++ b/contrib/file_fdw/file_fdw.c @@ -510,7 +510,7 @@ static unsigned long estimate_costs(const char *filename, RelOptInfo *baserel, double *startup_cost, double *total_cost) { - struct stat stat; + struct stat stat_buf; BlockNumber pages; double run_cost = 0; double cpu_per_tuple; @@ -518,7 +518,7 @@ estimate_costs(const char *filename, RelOptInfo *baserel, elog(DEBUG3, "%s called", __FUNCTION__); /* get size of the file */ - if (lstat(filename, &stat) == -1) + if (stat(filename, &stat_buf) == -1) { ereport(ERROR, (errcode_for_file_access(), @@ -531,7 +531,7 @@ estimate_costs(const char *filename, RelOptInfo *baserel, * - DISK costs are estimated from file size. * - CPU costs are 10x of seq scan, for overhead of parsing records. */ - pages = stat.st_size / BLCKSZ + (stat.st_size % BLCKSZ > 0 ? 1 : 0); + pages = stat_buf.st_size / BLCKSZ + (stat_buf.st_size % BLCKSZ > 0 ? 1 : 0); run_cost += seq_page_cost * pages; *startup_cost += baserel->baserestrictcost.startup; @@ -539,6 +539,6 @@ estimate_costs(const char *filename, RelOptInfo *baserel, run_cost += cpu_per_tuple * 10 * baserel->tuples; *total_cost = *startup_cost + run_cost; - return stat.st_size; + return stat_buf.st_size; }