我有超过 3400 个 CSV 文件,大小在 10kb 到 3mb 之间。每个 CSV 文件都有这个通用文件名:AAPL、GOOG、YHOO 等股票代码stockticker-Ret.csv
在哪里,并且在给定日期的每一分钟都有股票回报。我的 SAS 代码首先从SAS 数据集中的文件中stockticker
加载所有股票代码名称。stockticker-Ret.csv
我遍历每个股票代码以在.csv
名为. 可以想象,这个过程需要很长时间。有没有办法改进下面的代码以使这个过程更快? want
want
want
global
DO LOOP
/*Record in a sas dataset all the csv file name to extract the stock ticker*/
data yfiles;
keep filename;
length fref $8 filename $80;
rc = filename(fref, 'F:\data\');
if rc = 0 then do; did = dopen(fref);
rc = filename(fref); end; else do; length msg $200.; msg = sysmsg(); put msg=; did = .; end;
if did <= 0 then putlog 'ERR' 'OR: Unable to open directory.';
dnum = dnum(did);
do i = 1 to dnum; filename = dread(did, i); /* If this entry is a file, then output. */ fid = mopen(did, filename); if fid > 0 then output; end;
rc = dclose(did);
run;
/*store in yfiles all the stock tickers*/
data yfiles(drop=filename1 rename=(filename1=stock));
set yfiles;
filename1=tranwrd(filename,'-Ret.csv','');
run;
proc sql noprint;
select stock into :name separated by '*' from work.yfiles;
%let count2 = &sqlobs;
quit;
*Create the template of the desired GLOBAL SAS dataset;
proc sql;
create table global
(stock char(8), time_gap num(5), avg_ret num(5));
quit;
proc sql;
insert into global
(stock, time_gap,avg_ret)
values('',0,0);
quit;
%macro y1;
%do i = 1 %to &count2;
%let j = %scan(&name,&i,*);
proc import out = want datafile="F:\data\&j-Ret.csv"
dbms=csv replace;
getnames = yes;
run;
data want;
set want; ....
....[Here I do 5 Datasteps on the WANT sasfile]
/*Store the want file in a global SAS dataset that will contain all the stock tickers from the want file*/
data global;
set global want; run;
%end;
%mend y1;
%y1()
如您所见,全局 SAS 数据集针对want
我存储的每个数据集展开global
。