/[escript]/branches/subworld2/escriptcore/src/Utils.cpp
ViewVC logotype

Contents of /branches/subworld2/escriptcore/src/Utils.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 5504 - (show annotations)
Wed Mar 4 22:58:13 2015 UTC (4 years, 1 month ago) by jfenwick
File size: 19314 byte(s)
Again with a more up to date copy


1
2 /*****************************************************************************
3 *
4 * Copyright (c) 2003-2015 by University of Queensland
5 * http://www.uq.edu.au
6 *
7 * Primary Business: Queensland, Australia
8 * Licensed under the Open Software License version 3.0
9 * http://www.opensource.org/licenses/osl-3.0.php
10 *
11 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 * Development 2012-2013 by School of Earth Sciences
13 * Development from 2014 by Centre for Geoscience Computing (GeoComp)
14 *
15 *****************************************************************************/
16
17 #define ESNEEDPYTHON
18 #include "esysUtils/first.h"
19
20
21
22 #include <fstream>
23 #include <string.h>
24
25 // added for saveCSV
26 #include <boost/python.hpp>
27 #include <boost/scoped_array.hpp>
28 #include "Data.h"
29
30 #include "Utils.h"
31 #include "DataVector.h"
32
33 #ifdef _OPENMP
34 #include <omp.h>
35 #endif
36
37 #ifdef ESYS_MPI
38 #include <mpi.h>
39 #endif
40
41 #ifdef _WIN32
42 #include <WinSock2.h>
43 #else
44 #include <unistd.h>
45 #endif
46
47 namespace escript {
48
49 int getSvnVersion()
50 {
51 #ifdef SVN_VERSION
52 return SVN_VERSION;
53 #else
54 return 0;
55 #endif
56 }
57
58 /* This is probably not very robust, but it works on Savanna today and is useful for performance analysis */
59 int get_core_id() {
60 int processor_num=-1;
61 #ifdef CORE_ID1
62 FILE *fp;
63 int i, count_spaces=0;
64 char fname[100];
65 char buf[1000];
66
67 sprintf(fname, "/proc/%d/stat", getpid());
68 fp = fopen(fname, "r");
69 if (fp == NULL) return(-1);
70 fgets(buf, 1000, fp);
71 fclose(fp);
72
73 for (i=strlen(buf)-1; i>=0; i--) {
74 if (buf[i] == ' ') count_spaces++;
75 if (count_spaces == 4) break;
76 }
77 processor_num = atoi(&buf[i+1]);
78 #endif
79 return(processor_num);
80 }
81
82
83 void printParallelThreadCnt()
84 {
85 int mpi_iam=0, mpi_num=1;
86 char hname[64];
87
88 #ifdef HAVE_GETHOSTNAME
89 gethostname(hname, 64);
90 hname[63] = '\0';
91 #else
92 strcpy(hname, "unknown host");
93 #endif
94
95 #ifdef ESYS_MPI
96 MPI_Comm_rank(MPI_COMM_WORLD, &mpi_iam);
97 MPI_Comm_size(MPI_COMM_WORLD, &mpi_num);
98 #endif
99
100 #pragma omp parallel
101 {
102 int omp_iam=0, omp_num=1;
103 #ifdef _OPENMP
104 omp_iam = omp_get_thread_num(); /* Call in a parallel region */
105 omp_num = omp_get_num_threads();
106 #endif
107 #pragma omp critical (printthrdcount)
108 printf("printParallelThreadCounts: MPI=%03d/%03d OpenMP=%03d/%03d running on %s core %d\n",
109 mpi_iam, mpi_num, omp_iam, omp_num, hname, get_core_id());
110 }
111 }
112
113 void setNumberOfThreads(const int num_threads)
114 {
115
116 #ifdef _OPENMP
117 omp_set_num_threads(num_threads);
118 #endif
119
120 }
121
122 int getNumberOfThreads()
123 {
124 #ifdef _OPENMP
125 return omp_get_max_threads();
126 #else
127 return 1;
128 #endif
129
130 }
131
132 ESCRIPT_DLL_API int getMPISizeWorld() {
133 int mpi_num = 1;
134 #ifdef ESYS_MPI
135 MPI_Comm_size(MPI_COMM_WORLD, &mpi_num);
136 #endif
137 return mpi_num;
138 }
139
140 ESCRIPT_DLL_API int getMPIRankWorld() {
141 int mpi_iam = 0;
142 #ifdef ESYS_MPI
143 MPI_Comm_rank(MPI_COMM_WORLD, &mpi_iam);
144 #endif
145 return mpi_iam;
146 }
147
148 ESCRIPT_DLL_API int getMPIWorldMax(const int val) {
149 #ifdef ESYS_MPI
150 int val2 = val;
151 int out = val;
152 MPI_Allreduce( &val2, &out, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD );
153 #else
154 int out = val;
155 #endif
156 return out;
157 }
158
159 ESCRIPT_DLL_API int getMPIWorldSum(const int val) {
160 #ifdef ESYS_MPI
161 int val2 = val;
162 int out = 0;
163 MPI_Allreduce( &val2, &out, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
164 #else
165 int out = val;
166 #endif
167 return out;
168 }
169
170 #define CHILD_FAIL 2
171 #define CHILD_COMPLETE 4
172
173 #ifndef _WIN32
174 #ifdef ESYS_MPI
175 #include <sys/socket.h>
176 #include <sys/select.h>
177 #include <errno.h>
178 #include <arpa/inet.h>
179 int prepareSocket(unsigned short *port, int *key) {
180 if (getMPIRankWorld() != 0)
181 return 0;
182 int sfd = socket(AF_INET, SOCK_STREAM, 0);
183 if (sfd < 0) {
184 perror("socket creation failure");
185 return -1;
186 }
187 int opt = 1;
188 if (setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(int)) < 0) {
189 perror("socket option setting failure");
190 close(sfd);
191 return -1;
192 }
193
194 struct sockaddr_in addr;
195 addr.sin_family = AF_INET;
196 addr.sin_port = htons(0);
197 addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
198
199 if (bind(sfd, (struct sockaddr*)&addr, sizeof(addr)) < 0) {
200 perror("bind failure");
201 close(sfd);
202 return -1;
203 }
204
205 if (listen(sfd, SOMAXCONN) < 0) {
206 perror("listen failure");
207 close(sfd);
208 return -1;
209 }
210
211 struct sockaddr actual;
212 unsigned int size = sizeof(actual);
213 if (getsockname(sfd, &actual, &size) < 0) {
214 perror("failed when determining bound port number");
215 close(sfd);
216 return -1;
217 }
218
219 //if size > sizeof(actual), some info was truncated, but certainly not port
220
221 *port = ntohs(((struct sockaddr_in *) &actual)->sin_port);
222
223 unsigned int seed = time(NULL) % UINT_MAX;
224 *key = rand_r(&seed);
225 return sfd;
226 }
227
228 int check_data(unsigned int max, fd_set *all, fd_set *valid, int key, int sfd) {
229 int ret = 0;
230 for (int i = 0; i <= max; i++) {
231 if (i == sfd)
232 continue;
233 if (FD_ISSET(i, all)) {
234 int provided = 0;
235 if (recv(i, &provided, sizeof(int), MSG_WAITALL) == sizeof(int)
236 && provided == key) {
237 char deadspace[1024];
238 while ((provided = recv(i, deadspace, 1024, 0))) {
239 if (provided == -1) {
240 if (errno == EAGAIN || errno == EWOULDBLOCK) {
241 continue;
242 } else {
243 perror("connection failure");
244 return CHILD_FAIL;
245 }
246 }
247 }
248 return CHILD_COMPLETE;
249 } else {
250 FD_CLR(i, all);
251 close(i);
252 }
253 }
254 }
255 return ret;
256 }
257
258 void close_all(unsigned int maxfd, fd_set *all) {
259 for (int i = 0; i <= maxfd; i++) {
260 if (FD_ISSET(i, all))
261 close(i);
262 }
263 }
264 int waitForCompletion(int sfd, int key) {
265 if (getMPIRankWorld() != 0)
266 return 0;
267 int timeout = 10; //max of 10 seconds with no communication
268
269 fd_set all, valid;
270 FD_ZERO(&all);
271 FD_SET(sfd, &all);
272 time_t last_good_time = time(NULL);
273 unsigned int maxfd = sfd;
274 while (time(NULL) - last_good_time < timeout) {
275 struct timeval timer = {1,0}; //1 sec, 0 usec
276 int count = select(maxfd + 1, &all, NULL, NULL, &timer);
277 if (count == -1) { //error
278 if (errno == EINTR) {
279 continue; //just a signal, continue as we were
280 } else {
281 perror("socket operation error");
282 close(sfd);
283 return -1;
284 }
285 } else if (FD_ISSET(sfd, &all)) { //new connection
286 int connection = accept(sfd, NULL, NULL);
287 if (connection > maxfd)
288 maxfd = connection;
289 FD_SET(connection, &all);
290 FD_CLR(connection, &valid);
291 time(&last_good_time);
292 count--;
293 }
294 if (count > 0) { //something to read, either connection key or state
295 int res = check_data(maxfd, &all, &valid, key, sfd);
296 if (res == CHILD_FAIL) {
297 return -1;
298 } else if (res == CHILD_COMPLETE) {
299 close_all(maxfd, &all);
300 return 0;
301 }
302 }
303 }
304 close_all(maxfd, &all);
305 fprintf(stderr, "Connection to child process timed out\n");
306 return -1;
307 }
308 #endif //ESYS_MPI
309 #endif //not _WIN32
310
311 ESCRIPT_DLL_API int runMPIProgram(boost::python::list args) {
312 #ifdef ESYS_MPI
313 MPI_Comm intercomm;
314 MPI_Info info;
315 int errors;
316 int nargs = boost::python::extract<int>(args.attr("__len__")());
317 std::string cmd = boost::python::extract<std::string>(args[0]);
318 #ifdef _WIN32
319 char** c_args = new char*[nargs];
320 char* c_cmd = const_cast<char*>(cmd.c_str());;
321 // skip command name in argument list
322 for (int i=1; i<nargs; i++) {
323 cpp_args[i-1]=boost::python::extract<std::string>(args[i]);
324 c_args[i-1]=const_cast<char*>(cpp_args[i-1].c_str());
325 }
326 MPI_Info_create(&info);
327 MPI_Comm_spawn(c_cmd, c_args, 1, info, 0, MPI_COMM_WORLD, &intercomm, &errors);
328 MPI_Info_free(&info);
329 delete[] c_args;
330
331 return errors;
332 #else //#ifdef _WIN32
333 char** c_args = new char*[2+nargs];
334 std::vector<std::string> cpp_args(2+nargs);//allow for wrapper, port, and key
335 char c_cmd[] = "escript-overlord";
336 // skip command name in argument list
337 for (int i=1; i<nargs; i++) {
338 cpp_args[i+1]=boost::python::extract<std::string>(args[i]);
339 c_args[i+1]=const_cast<char*>(cpp_args[i+1].c_str());
340 }
341 unsigned short port = 0;
342 int key = 0;
343 int sock = prepareSocket(&port, &key);
344 if (getMPIWorldSum(sock) < 0)
345 return -1;
346 c_args[nargs+1]=NULL;
347 char portstr[20] = {'\0'}, keystr[20] = {'\0'};
348 sprintf(portstr, "%d", port);
349 sprintf(keystr, "%d", key);
350 c_args[0] = portstr;
351 c_args[1] = keystr;
352 c_args[2] = const_cast<char*>(cmd.c_str());
353
354 MPI_Info_create(&info);
355 //force the gmsh process to run on this host as well for network comm
356 char hostname[MPI_MAX_PROCESSOR_NAME];
357 int temp = MPI_MAX_PROCESSOR_NAME;
358 MPI_Get_processor_name(hostname, &temp);
359 char hoststr[] = "host"; //for warnings
360 MPI_Info_set(info, hoststr, hostname);
361 MPI_Comm_spawn(c_cmd, c_args, 1, info, 0, MPI_COMM_WORLD, &intercomm, &errors);
362 MPI_Info_free(&info);
363 delete[] c_args;
364 if (errors != MPI_SUCCESS)
365 return errors;
366 return getMPIWorldMax(waitForCompletion(sock, key));
367 #endif //#ifdef _WIN32/else
368 #else //#ifdef ESYS_MPI
369 std::string cmd;
370 int nargs = boost::python::extract<int>(args.attr("__len__")());
371 for (int i=0; i<nargs; i++) {
372 cmd+=boost::python::extract<std::string>(args[i]);
373 cmd+=" ";
374 }
375 return system(cmd.c_str());
376 #endif //#ifdef ESYS_MPI/else
377 }
378 #undef CHILD_COMPLETE
379 #undef CHILD_FAIL
380
381 ESCRIPT_DLL_API double getMachinePrecision() {
382 return DBL_EPSILON;
383 }
384 ESCRIPT_DLL_API double getMaxFloat() {
385 return DBL_MAX;
386 }
387 ESCRIPT_DLL_API void MPIBarrierWorld() {
388 #ifdef ESYS_MPI
389 if (!esysUtils::NoCOMM_WORLD::active())
390 {
391 MPI_Barrier(MPI_COMM_WORLD );
392 }
393 else
394 {
395 throw esysUtils::EsysException("Attempt to use MPI_COMM_WORLD while it is blocked.");
396 }
397 #endif
398 }
399
400
401 ESCRIPT_DLL_API
402 void
403 saveDataCSV(const std::string& filename, boost::python::dict arg, const std::string& sep, const std::string& csep,
404 bool append)
405 {
406 using std::cout;
407 using std::endl;
408 boost::python::list keys=arg.keys();
409 int numdata = boost::python::extract<int>(arg.attr("__len__")());
410 bool hasmask=arg.has_key("mask");
411 Data mask;
412 if (hasmask)
413 {
414 mask=boost::python::extract<escript::Data>(arg["mask"]);
415 keys.remove("mask");
416 numdata--;
417 if (mask.getDataPointRank()!=0)
418 {
419 throw DataException("saveDataCSVcpp: masks must be scalar.");
420 }
421 }
422 if (numdata<1)
423 {
424 throw DataException("saveDataCSVcpp: no data to save specified.");
425 }
426 std::vector<int> step(numdata);
427 std::vector<std::string> names(numdata);
428 std::vector<Data> data(numdata);
429 std::vector<const DataAbstract::ValueType::value_type*> samples(numdata);
430 std::vector<int> offset(numdata);
431 std::vector<int> fstypes(numdata); // FunctionSpace types for each data
432
433 keys.sort(); // to get some predictable order to things
434
435 // We need to interpret the samples correctly even if they are different types
436 // for this reason, we should iterate over samples
437 for (int i=0;i<numdata;++i)
438 {
439 names[i]=boost::python::extract<std::string>(keys[i]);
440 data[i]=boost::python::extract<escript::Data>(arg[keys[i]]);
441 step[i]=(data[i].actsExpanded()?DataTypes::noValues(data[i].getDataPointShape()):0);
442 fstypes[i]=data[i].getFunctionSpace().getTypeCode();
443 if (i>0)
444 {
445 if (data[i].getDomain()!=data[i-1].getDomain())
446 {
447 throw DataException("saveDataCSVcpp: all data must be on the same domain.");
448 }
449 }
450 }
451 int bestfnspace=0;
452 if (!data[0].getDomain()->commonFunctionSpace(fstypes, bestfnspace))
453 {
454 throw DataException("saveDataCSVcpp: FunctionSpaces of data are incompatible");
455 }
456 // now we interpolate all data to the same type
457 FunctionSpace best(data[0].getDomain(),bestfnspace);
458 for (int i=0;i<numdata;++i)
459 {
460 data[i]=data[i].interpolate(best);
461 }
462 int numsamples=data[0].getNumSamples(); // these must be the same for all data
463 int dpps=data[0].getNumDataPointsPerSample();
464
465
466 std::ostringstream os;
467
468
469 bool first=true;
470
471 if (data[0].getDomain()->getMPIRank()==0)
472 {
473 for (int i=0;i<numdata;++i)
474 {
475 const DataTypes::ShapeType& s=data[i].getDataPointShape();
476 switch (data[i].getDataPointRank())
477 {
478 case 0: if (!first)
479 {
480 os << sep;
481 }
482 else
483 {
484 first=false;
485 }
486 os << names[i]; break;
487 case 1: for (int j=0;j<s[0];++j)
488 {
489 if (!first)
490 {
491 os << sep;
492 }
493 else
494 {
495 first=false;
496 }
497 os << names[i] << csep << j;
498 }
499 break;
500 case 2: for (int j=0;j<s[0];++j)
501 {
502 for (int k=0;k<s[1];++k)
503 {
504 if (!first)
505 {
506 os << sep;
507 }
508 else
509 {
510 first=false;
511 }
512 os << names[i] << csep << k << csep << j;
513 }
514 }
515 break;
516 case 3: for (int j=0;j<s[0];++j)
517 {
518 for (int k=0;k<s[1];++k)
519 {
520 for (int l=0;l<s[2];++l)
521 {
522 if (!first)
523 {
524 os << sep;
525 }
526 else
527 {
528 first=false;
529 }
530 os << names[i] << csep << k << csep << j << csep << l;
531 }
532 }
533 }
534 break;
535 case 4: for (int j=0;j<s[0];++j)
536 {
537 for (int k=0;k<s[1];++k)
538 {
539 for (int l=0;l<s[2];++l)
540 {
541 for (int m=0;m<s[3];++m)
542 {
543 if (!first)
544 {
545 os << sep;
546 }
547 else
548 {
549 first=false;
550 }
551 os << names[i] << csep << k << csep << j << csep << l << csep << m;
552 }
553 }
554 }
555 }
556 break;
557 default:
558 throw DataException("saveDataCSV: Illegal rank");
559 }
560 }
561 os << endl;
562 }
563
564 const double* masksample=0;
565
566 bool expandedmask=false; // does the mask act expanded. Are there mask value for each point in the sample
567 bool wantrow=true; // do we output this row?
568 if (hasmask)
569 {
570 if (mask.actsExpanded())
571 {
572 expandedmask=true;
573 }
574 }
575 os.setf(std::ios_base::scientific, std::ios_base::floatfield);
576 os.precision(15);
577
578 // errors prior to this point will occur on all processes anyway
579 // so there is no need to explicitly notify other ranks
580 int error=0;
581 try{
582 for (int i=0;i<numsamples;++i)
583 {
584 if (!best.ownSample(i))
585 {
586 continue;
587 }
588 wantrow=true;
589 for (int d=0;d<numdata;++d)
590 {
591 samples[d]=data[d].getSampleDataRO(i);
592 }
593 if (hasmask)
594 {
595 masksample=mask.getSampleDataRO(i);
596 if (!expandedmask) // mask controls whole sample
597 {
598 if (masksample[0]<=0) // masks are scalar
599 {
600 wantrow=false;
601 }
602 }
603 }
604 for (int j=0;j<dpps;++j)
605 {
606 // now we need to check if this point is masked off
607 if (expandedmask)
608 {
609 wantrow=(masksample[j]>0); // masks are scalar to the relevant value is at [j]
610 }
611 if (wantrow)
612 {
613 bool needsep=false;
614 for (int d=0;d<numdata;++d)
615 {
616 DataTypes::pointToStream(os, samples[d], data[d].getDataPointShape(), offset[d], needsep, sep);
617 needsep=true;
618 offset[d]+=step[d];
619 }
620 os << endl;
621 }
622 }
623 for (int d=0;d<numdata;++d)
624 {
625 offset[d]=0;
626 }
627 }
628 } catch (...)
629 {
630 error=1;
631 if (data[0].getDomain()->getMPISize()==1) {
632 throw;
633 }
634 }
635 #ifdef ESYS_MPI
636 MPI_Comm com=data[0].getDomain()->getMPIComm();
637 int rerror=0;
638 MPI_Allreduce( &error, &rerror, 1, MPI_INT, MPI_MAX, com );
639 error=rerror;
640 if (error)
641 {
642 throw DataException("saveDataCSVcpp: error building output");
643 }
644 #endif
645
646 // at this point os will contain the text to be written
647 #ifndef ESYS_MPI
648 (void) error;
649
650 std::ofstream ofs;
651 if (append)
652 {
653 ofs.open(filename.c_str(), std::ios_base::app);
654 }
655 else
656 {
657 ofs.open(filename.c_str());
658 }
659 if (!ofs.is_open())
660 {
661 throw DataException("saveDataCSVcpp: unable to open file for writing");
662 }
663 ofs << os.str();
664 ofs.close();
665
666 #else
667 // here we have MPI
668 MPI_File mpi_fileHandle_p;
669 MPI_Status mpi_status;
670 MPI_Info mpi_info = MPI_INFO_NULL;
671 boost::scoped_array<char> fname_p(new char[filename.size()+1]);
672 strcpy(fname_p.get(), filename.c_str());
673
674 int amode = MPI_MODE_CREATE|MPI_MODE_WRONLY|MPI_MODE_UNIQUE_OPEN;
675 if (append)
676 {
677 amode |= MPI_MODE_APPEND;
678 }
679 else
680 {
681 if (data[0].getDomain()->getMPIRank()==0)
682 {
683 std::ifstream ifs(fname_p.get()); // if file exists, remove it
684 if (ifs.is_open())
685 {
686 ifs.close();
687 if (remove(fname_p.get()))
688 {
689 error=1;
690 }
691 }
692 }
693 data[0].getDomain()->MPIBarrier();
694 int rerror=0;
695 MPI_Allreduce( &error, &rerror, 1, MPI_INT, MPI_MAX, com );
696 if (rerror!=0)
697 {
698 std::ostringstream oss;
699 oss << "saveDataCSVcpp: File " << filename << " already exists and could not be removed in preparation for new output.";
700 throw DataException(oss.str());
701 }
702 }
703 int ierr;
704 ierr = MPI_File_open(com, fname_p.get(), amode, mpi_info, &mpi_fileHandle_p);
705 if (ierr != MPI_SUCCESS)
706 {
707 std::ostringstream oss;
708 oss << "saveDataCSVcpp: File " << filename << " could not be opened for writing in parallel";
709 // file is not open so we can throw
710 throw DataException(oss.str());
711 }
712 else
713 {
714 ierr=MPI_File_set_view(mpi_fileHandle_p,MPI_DISPLACEMENT_CURRENT,
715 MPI_CHAR, MPI_CHAR, const_cast<char*>("native"), mpi_info);
716 // here we are assuming that std::string holds the same type of char as MPI_CHAR
717 }
718
719 std::string contents=os.str();
720 boost::scoped_array<char> buff(new char[contents.size()+1]);
721 strcpy(buff.get(), contents.c_str());
722 ierr=MPI_File_write_ordered(mpi_fileHandle_p, buff.get(), contents.size(), MPI_CHAR, &mpi_status);
723 if (ierr != MPI_SUCCESS)
724 {
725 error=1;
726 }
727
728 if (MPI_File_close(&mpi_fileHandle_p)!= MPI_SUCCESS)
729 {
730 error=1;
731 }
732 data[0].getDomain()->MPIBarrier();
733 if (error) // any errors at this stage are from collective routines
734 { // so there is no need to reduce_all
735 throw DataException("saveDataCSVcpp: Error writing and closing file");
736 }
737
738 #endif
739 }
740
741
742 void
743 resolveGroup(boost::python::object obj)
744 {
745 int len=0;
746 try
747 {
748 len=boost::python::extract<int>(obj.attr("__len__")());
749 }
750 catch(...)
751 {
752 PyErr_Clear(); // tell python the error isn't there anymore
753 throw DataException("Error - resolveGroup expects a sequence object.");
754 }
755 std::vector<DataLazy*> dats;
756 std::vector<Data*> dp;
757 for (int i=0;i<len;++i)
758 {
759 Data* p=0;
760 try
761 {
762 p=boost::python::extract<Data*>(obj[i]);
763 }
764 catch(...)
765 {
766 PyErr_Clear();
767 throw DataException("Error - resolveGroup only accepts Data objects.");
768 }
769 if (p->isLazy())
770 {
771 dats.push_back(dynamic_cast<DataLazy*>(p->borrowData()));
772 dp.push_back(p);
773 }
774 }
775 if (!dats.empty())
776 {
777 dats[0]->resolveGroupWorker(dats);
778 }
779 // all the data will be identities now but its still lazy
780 // convert it to ready
781 for (int i=dp.size()-1;i>=0;--i)
782 {
783 dp[i]->resolve();
784 }
785 }
786
787
788 } // end of namespace

  ViewVC Help
Powered by ViewVC 1.1.26