Changeset 1623


Ignore:
Timestamp:
08/10/09 16:28:58 (16 years ago)
Author:
seroussi
Message:

removed parallel parts of ice

Location:
issm/trunk/src/m/solutions/ice
Files:
26 edited

Legend:

Unmodified
Added
Removed
  • issm/trunk/src/m/solutions/ice/BuildGridSets.m

    r1 r1623  
    1515%      BuildGridSets(grids,constraints)
    1616
    17 global cluster
    1817global gridset
    1918
     
    5655        if isempty(grid),continue;end;
    5756
    58         if ~cluster,
    59                 %if dof 1 is spc'ed, put it in the sset, otherwise, in the fset
    60                 if ~isempty(findstr(grid.gridset,'1')), sset(grid.doflist(1))=1; else fset(grid.doflist(1))=1; end
    61                 if ~isempty(findstr(grid.gridset,'2')), sset(grid.doflist(2))=1; else fset(grid.doflist(2))=1; end
    62                 if ~isempty(findstr(grid.gridset,'3')), sset(grid.doflist(3))=1; else fset(grid.doflist(3))=1; end
    63                 if ~isempty(findstr(grid.gridset,'4')), sset(grid.doflist(4))=1; else fset(grid.doflist(4))=1; end
    64                 if ~isempty(findstr(grid.gridset,'5')), sset(grid.doflist(5))=1; else fset(grid.doflist(5))=1; end
    65                 if ~isempty(findstr(grid.gridset,'6')), sset(grid.doflist(6))=1; else fset(grid.doflist(6))=1; end
    66         else
    67                 %same thing, except that if the grid is a border grid, we only carry out the inclusion on lab 1
    68                 if grid.border==1,
    69                         if labindex==1,
    70                                 if ~isempty(findstr(grid.gridset,'1')), sset(grid.doflist(1))=1; else fset(grid.doflist(1))=1; end
    71                                 if ~isempty(findstr(grid.gridset,'2')), sset(grid.doflist(2))=1; else fset(grid.doflist(2))=1; end
    72                                 if ~isempty(findstr(grid.gridset,'3')), sset(grid.doflist(3))=1; else fset(grid.doflist(3))=1; end
    73                                 if ~isempty(findstr(grid.gridset,'4')), sset(grid.doflist(4))=1; else fset(grid.doflist(4))=1; end
    74                                 if ~isempty(findstr(grid.gridset,'5')), sset(grid.doflist(5))=1; else fset(grid.doflist(5))=1; end
    75                                 if ~isempty(findstr(grid.gridset,'6')), sset(grid.doflist(6))=1; else fset(grid.doflist(6))=1; end
    76                         end
    77                 else
    78                         if ~isempty(findstr(grid.gridset,'1')), sset(grid.doflist(1))=1; else fset(grid.doflist(1))=1; end
    79                         if ~isempty(findstr(grid.gridset,'2')), sset(grid.doflist(2))=1; else fset(grid.doflist(2))=1; end
    80                         if ~isempty(findstr(grid.gridset,'3')), sset(grid.doflist(3))=1; else fset(grid.doflist(3))=1; end
    81                         if ~isempty(findstr(grid.gridset,'4')), sset(grid.doflist(4))=1; else fset(grid.doflist(4))=1; end
    82                         if ~isempty(findstr(grid.gridset,'5')), sset(grid.doflist(5))=1; else fset(grid.doflist(5))=1; end
    83                         if ~isempty(findstr(grid.gridset,'6')), sset(grid.doflist(6))=1; else fset(grid.doflist(6))=1; end
    84                 end
    85         end
    86 end
    87 
    88 
    89 if cluster,
    90         error('Not cool, broke it!');
    91         sset=gplus(sset);
    92         fset=gplus(fset);
     57        %if dof 1 is spc'ed, put it in the sset, otherwise, in the fset
     58        if ~isempty(findstr(grid.gridset,'1')), sset(grid.doflist(1))=1; else fset(grid.doflist(1))=1; end
     59        if ~isempty(findstr(grid.gridset,'2')), sset(grid.doflist(2))=1; else fset(grid.doflist(2))=1; end
     60        if ~isempty(findstr(grid.gridset,'3')), sset(grid.doflist(3))=1; else fset(grid.doflist(3))=1; end
     61        if ~isempty(findstr(grid.gridset,'4')), sset(grid.doflist(4))=1; else fset(grid.doflist(4))=1; end
     62        if ~isempty(findstr(grid.gridset,'5')), sset(grid.doflist(5))=1; else fset(grid.doflist(5))=1; end
     63        if ~isempty(findstr(grid.gridset,'6')), sset(grid.doflist(6))=1; else fset(grid.doflist(6))=1; end
    9364end
    9465
  • issm/trunk/src/m/solutions/ice/DeviatoricStressCompute.m

    r51 r1623  
    66
    77%global variables
    8 global cluster gridset
     8global gridset
    99
    1010%recover fem model fields
  • issm/trunk/src/m/solutions/ice/Dof.m

    r1 r1623  
    1212%      grids=Dof(grids)
    1313
    14 global cluster
    15 
    1614dofcount=length(grids)*6;
    1715
    18 if ~cluster,
    19         dof=1;
    20         for i=1:length(grids),
    21                 grids(i).grid.doflist=dof:1:(dof+5);
    22                 dof=dof+6;
    23         end
    24 else
    25         borderdof=1;
    26         dof=1;
    27         for i=1:length(grids),
    28                 if ~isempty(grids(i).grid),
    29                         if ~grids(i).grid.border,
    30                                 grids(i).grid.doflist=dof:1:(dof+5);
    31                                 dof=dof+6;
    32                         else
    33                                 grids(i).grid.doflist=borderdof:1:(borderdof+5);
    34                                 borderdof=borderdof+6;
    35                         end
    36                 end
    37         end
    38         localdofcount=dof-1;
    39         localborderdofcount=borderdof-1;
    40 
    41         %Now, offset the border dofs so that they are lumped at the end of the dof list.
    42         for i=1:length(grids),
    43                 if (~isempty(grids(i).grid) & grids(i).grid.border),
    44                         grids(i).grid.doflist=grids(i).grid.doflist+dofcount-localborderdofcount;
    45                 end
    46         end
    47 
    48         %Now, update the regular local grid dofs to account for other cpu grids.
    49         alldofcounts=gcat(localdofcount); %this array holds the dof count for all cpus.
    50        
    51         %Determine offset for all grid dofs
    52         dofoffset=sum(alldofcounts(1:labindex-1));
    53        
    54         %offset all grid dofs.
    55         for i=1:length(grids),
    56                 if (~isempty(grids(i).grid) & ~grids(i).grid.border),
    57                         grids(i).grid.doflist=grids(i).grid.doflist+dofoffset;
    58                 end
    59         end
     16dof=1;
     17for i=1:length(grids),
     18        grids(i).grid.doflist=dof:1:(dof+5);
     19        dof=dof+6;
    6020end
  • issm/trunk/src/m/solutions/ice/ModelProcessor.m

    r327 r1623  
    1111%      [elements,grids,loads,constraints,materials,part,tpart]=ModelProcessor(md,solutiontype)
    1212
    13 global cluster
    1413if strcmpi(solutiontype,'diagnostic_horiz'),
    1514        [elements,grids,loads,constraints,materials,part,tpart]=ModelProcessorDiagnosticHoriz(md);
  • issm/trunk/src/m/solutions/ice/ModelProcessorDiagnosticHoriz.m

    r1299 r1623  
    1111%      [elements,grids,loads,constraints,materials,part,tpart]=ModelProcessorDiagnosticHoriz(md)
    1212
    13 global cluster
    14 
    15 if cluster,
    16         %We are running in parallel, we need to partition the elements
    17         element_partitioning=MeshPartition(md,numlabs);
    18 else
    19         %We are running in serial, all elements belong to the same partition.
    20         element_partitioning=ones(md.numberofelements,1);
    21         labindex=1; %older versions of matlab do not include the parallel toolbox labindex variable.
    22 end
    23 
    2413%Allocate grids and elements
    2514if strcmpi(md.type,'2d'),
     
    3019elements=struct('element',cell(numberofelements_horiz,1));
    3120materials=struct('material',cell(numberofelements_horiz+1,1));
    32 mygrids=zeros(md.numberofgrids,1); %this array determines grid partitioning.
    3321
    3422%Deal with 2d elements
    3523if strcmpi(md.type,'2d'),
    36         el3pos=find((md.elements_type(:,1)==MacAyealEnum()) & (element_partitioning==labindex));
     24        el3pos=find(md.elements_type(:,1)==MacAyealEnum);
    3725        if ~isempty(el3pos),
    3826                pos=[1:length(el3pos)]';
     
    6654        %3d elements
    6755        %First create penta elements (ie non hutter)
    68         el6pos=find((md.elements_type(:,1)==MacAyealEnum() | md.elements_type(:,1)==PattynEnum()) & (element_partitioning==labindex));
     56        el6pos=find(md.elements_type(:,1)==MacAyealEnum | md.elements_type(:,1)==PattynEnum);
    6957
    7058        if ~isempty(el6pos),
     
    120108materials(end)=SetStructureField(materials(end),'constants','thermal_exchange_velocity',md.thermal_exchange_velocity);
    121109
    122 if cluster,
    123         %For elements, the corresponding grids belong to this cpu. Keep track of it.
    124         mygrids(md.elements(el3pos,:))=1;
    125         mygrids(md.elements(el6pos,:))=1;
    126 end
    127 
    128 if cluster,
    129         %Figure out which grids from the partitioning belong to different element partitions. We'll
    130         %call them 'border' grids.
    131         bordergrids=double(gplus(mygrids)>1);
    132 else
    133         bordergrids=zeros(md.numberofgrids,1); %no partitioning serially.
    134 end
    135 
    136110%Get the grids set up:
    137111grids=struct('grid',cell(md.numberofgrids,1));
     
    145119grids(pos)=SetStructureField(grids(pos),'grid','s',(md.z(pos)-md.bed(pos))./md.thickness(pos));
    146120grids(pos)=SetStructureField(grids(pos),'grid','onbed',md.gridonbed(pos));
    147 grids(pos)=SetStructureField(grids(pos),'grid','border',bordergrids(pos));
    148121
    149122%spc degrees of freedom:
     
    190163for i=1:size(md.segmentonneumann_diag,1),
    191164
    192         if (element_partitioning(md.segmentonneumann_diag(i,end))~=labindex), %this load does not belong to this cpu element partition.
    193                 continue;
    194         end
    195 
    196165        if strcmpi(md.type,'3d'),
    197166                if md.elements_type(md.segmentonneumann_diag(i,end))==MacAyealEnum(),
  • issm/trunk/src/m/solutions/ice/ModelProcessorDiagnosticHutter.m

    r1024 r1623  
    1111%      [elements,grids,loads,constraints,materials,part,tpart]=ModelProcessorDiagnosticHutter(md);
    1212
    13 global cluster
    14 
    15 if cluster,
    16         %We are running in parallel, we need to partition the elements
    17         element_partitioning=MeshPartition(md,numlabs);
    18 else
    19         %We are running in serial, all elements belong to the same partition.
    20         element_partitioning=ones(md.numberofelements,1);
    21         labindex=1; %older versions of matlab do not include the parallel toolbox labindex variable.
    22 end
    23 
    2413%Allocate grids and elements
    2514if strcmpi(md.type,'2d'),
     
    2918        elements=struct('element',cell(length(pos),1));
    3019        materials=struct('material',cell(length(pos)+1,1));
    31         mygrids=zeros(md.numberofgrids,1); %this array determines grid partitioning.
    3220        [elements(count).element]=deal(singelem);
    3321
     
    5139        elements=struct('element',cell(length(pos),1));
    5240        materials=struct('material',cell(length(pos)+1,1));
    53         mygrids=zeros(md.numberofgrids,1); %this array determines grid partitioning.
    5441        [elements(count).element]=deal(beamelem);
    5542
     
    7966materials(end)=SetStructureField(materials(end),'constants','meltingpoint',md.meltingpoint);
    8067
    81 if cluster,
    82         %For elements, the corresponding grids belong to this cpu. Keep track of it.
    83         mygrids(md.elements(el3pos,:))=1;
    84         mygrids(md.elements(el6pos,:))=1;
    85 end
    86 
    87 if cluster,
    88         %Figure out which grids from the partitioning belong to different element partitions. We'll
    89         %call them 'border' grids.
    90         bordergrids=double(gplus(mygrids)>1);
    91 else
    92         bordergrids=zeros(md.numberofgrids,1); %no partitioning serially.
    93 end
    94 
    9568%Get the grids set up:
    9669grids=struct('grid',cell(md.numberofgrids,1));
     
    10477grids(pos)=SetStructureField(grids(pos),'grid','s',(md.z(pos)-md.bed(pos))./md.thickness(pos));
    10578grids(pos)=SetStructureField(grids(pos),'grid','onbed',md.gridonbed(pos));
    106 grids(pos)=SetStructureField(grids(pos),'grid','border',bordergrids(pos));
    10779
    10880%spc degrees of freedom:
  • issm/trunk/src/m/solutions/ice/ModelProcessorDiagnosticStokes.m

    r1299 r1623  
    1111%      [elements,grids,loads,constraints,materials,part,tpart]=ModelProcessorDiagnosticHoriz(md)
    1212
    13 global cluster
    14 
    15 if cluster,
    16         %We are running in parallel, we need to partition the elements
    17         element_partitioning=MeshPartition(md,numlabs);
    18 else
    19         %We are running in serial, all elements belong to the same partition.
    20         element_partitioning=ones(md.numberofelements,1);
    21         labindex=1; %older versions of matlab do not include the parallel toolbox labindex variable.
    22 end
    23 
    2413%Allocate grids and elements
    2514numberofstokeselements=size(find(md.elements_type(:,2)==StokesEnum()),1);
    2615elements=struct('element',cell(numberofstokeselements,1));
    2716materials=struct('material',cell(numberofstokeselements+1,1));
    28 mygrids=zeros(md.numberofgrids,1); %this array determines grid partitioning.
    2917
    3018%3d elements
    31 %pos=find(element_partitioning==labindex);
    32 pos=find(md.elements_type(:,2)==StokesEnum());
     19pos=find(md.elements_type(:,2)==StokesEnum);
    3320stokesnewnumber=[1:numberofstokeselements]';
    3421[elements(stokesnewnumber).element]=deal(pentaelem);
     
    7562
    7663
    77 if cluster,
    78         %For elements, the corresponding grids belong to this cpu. Keep track of it.
    79         mygrids(md.elements(pos,:))=1;
    80 end
    81 
    82 if cluster,
    83         %Figure out which grids from the partitioning belong to different element partitions. We'll
    84         %call them 'border' grids.
    85         bordergrids=double(gplus(mygrids)>1);
    86 else
    87         bordergrids=zeros(md.numberofgrids,1); %no partitioning serially.
    88 end
    89 
    9064%Get the grids set up:
    9165grids=struct('grid',cell(md.numberofgrids,1));
     
    9973grids(pos)=SetStructureField(grids(pos),'grid','s',(md.z(pos)-md.bed(pos))./md.thickness(pos));
    10074grids(pos)=SetStructureField(grids(pos),'grid','onbed',md.gridonbed(pos));
    101 grids(pos)=SetStructureField(grids(pos),'grid','border',bordergrids(pos));
    10275
    10376%spc degrees of freedom:
     
    140113for i=1:length_segmentonneumann_diag_stokes,
    141114
    142         if (element_partitioning(segmentonneumann_diag_stokes(i,end))~=labindex), %this load does not belong to this cpu element partition.
    143                 continue;
    144         end
    145        
    146115        %build a quad ice front for the penta element
    147116        loads(i).load=icefront;
  • issm/trunk/src/m/solutions/ice/ModelProcessorDiagnosticVert.m

    r1040 r1623  
    1111%      [elements,grids,loads,constraints,materials,part,tpart]=ModelProcessorDiagnosticVert(md);
    1212
    13 global cluster
    14 
    15 if cluster,
    16         %We are running in parallel, we need to partition the elements
    17         element_partitioning=MeshPartition(md,numlabs);
    18 else
    19         %We are running in serial, all elements belong to the same partition.
    20         element_partitioning=ones(md.numberofelements,1);
    21         labindex=1; %older versions of matlab do not include the parallel toolbox labindex variable.
    22 end
    23 
    2413%Allocate grids and elements
    2514elements=struct('element',cell(md.numberofelements,1));
    2615materials=struct('material',cell(md.numberofelements+1,1));
    27 mygrids=zeros(md.numberofgrids,1); %this array determines grid partitioning.
    2816
    2917%3d elements
    30 pos=find(element_partitioning==labindex);
     18pos=[1:md.numberofelements]';
    3119[elements(pos).element]=deal(pentaelem);
    3220
     
    7260materials(end)=SetStructureField(materials(end),'constants','thermal_exchange_velocity',md.thermal_exchange_velocity);
    7361
    74 if cluster,
    75         %Figure out which grids from the partitioning belong to different element partitions. We'll
    76         %call them 'border' grids.
    77         bordergrids=double(gplus(mygrids)>1);
    78 else
    79         bordergrids=zeros(md.numberofgrids,1); %no partitioning serially.
    80 end
    81 
    8262%Get the grids set up:
    8363grids=struct('grid',cell(md.numberofgrids,1));
     
    9272grids(pos)=SetStructureField(grids(pos),'grid','surface',md.surface(pos));
    9373grids(pos)=SetStructureField(grids(pos),'grid','onbed',md.gridonbed(pos));
    94 grids(pos)=SetStructureField(grids(pos),'grid','border',bordergrids(pos));
    9574
    9675%spc degrees of freedom:
  • issm/trunk/src/m/solutions/ice/ModelProcessorMelting.m

    r1040 r1623  
    1111%      [elements,grids,loads,constraints,materials,part,tpart]=ModelProcessorMelting(md,solutiontype)
    1212
    13 global cluster
    14 
    15 if cluster,
    16         %We are running in parallel, we need to partition the elements
    17         element_partitioning=MeshPartition(md,numlabs);
    18 else
    19         %We are running in serial, all elements belong to the same partition.
    20         element_partitioning=ones(md.numberofelements,1);
    21         labindex=1; %older versions of matlab do not include the parallel toolbox labindex variable.
    22 end
    23 
    2413%Allocate grids and elements
    2514elements=struct('element',cell(md.numberofelements,1));
    2615materials=struct('material',cell(md.numberofelements+1,1));
    27 mygrids=zeros(md.numberofgrids,1); %this array determines grid partitioning.
    2816
    2917%Build elements
     
    3220end
    3321
    34 pos=find(element_partitioning==labindex);
     22pos=[1:md.numberofelements]';
    3523[elements(pos).element]=deal(pentaelem);
    3624
     
    6553materials(end)=SetStructureField(materials(end),'constants','thermal_exchange_velocity',md.thermal_exchange_velocity);
    6654
    67 if cluster,
    68         %For elements, the corresponding grids belong to this cpu. Keep track of it.
    69         mygrids(md.elements(el3pos,:))=1;
    70         mygrids(md.elements(el6pos,:))=1;
    71        
    72         %Figure out which grids from the partitioning belong to different element partitions. We'll
    73         %call them 'border' grids.
    74         bordergrids=double(gplus(mygrids)>1);
    75 else
    76         bordergrids=zeros(md.numberofgrids,1); %no partitioning serially.
    77 end
    78 
    7955%Get the grids set up:
    8056grids=struct('grid',cell(md.numberofgrids,1));
     
    8965grids(pos)=SetStructureField(grids(pos),'grid','surface',md.surface(pos));
    9066grids(pos)=SetStructureField(grids(pos),'grid','onbed',md.gridonbed(pos));
    91 grids(pos)=SetStructureField(grids(pos),'grid','border',bordergrids(pos));
    9267
    9368%spc degrees of freedom:
  • issm/trunk/src/m/solutions/ice/ModelProcessorPrognostic.m

    r1040 r1623  
    1111%      [elements,grids,loads,constraints,materials,part,tpart]=ModelProcessorPrognostic(md)
    1212
    13 global cluster
    14 
    15 if cluster,
    16         %We are running in parallel, we need to partition the elements
    17         element_partitioning=MeshPartition(md,numlabs);
    18 else
    19         %We are running in serial, all elements belong to the same partition.
    20         element_partitioning=ones(md.numberofelements,1);
    21         labindex=1; %older versions of matlab do not include the parallel toolbox labindex variable.
    22 end
    23 
    2413%Allocate grids and elements
    2514elements=struct('element',cell(md.numberofelements,1));
    2615materials=struct('material',cell(0,0));
    27 mygrids=zeros(md.numberofgrids,1); %this array determines grid partitioning.
    2816
    2917%Build elements
    3018if strcmpi(md.type,'2d'),
    31         pos=find(element_partitioning==labindex);
     19        pos=[1:md.numberofelements]';
    3220        [elements(pos).element]=deal(triaelem);
    3321
     
    4230
    4331else
    44         pos=find(element_partitioning==labindex);
     32        pos=[1:md.numberofelements]';
    4533        [elements(pos).element]=deal(pentaelem);
    4634
     
    5846end
    5947
    60 if cluster,
    61         %For elements, the corresponding grids belong to this cpu. Keep track of it.
    62         mygrids(md.elements(el3pos,:))=1;
    63         mygrids(md.elements(el6pos,:))=1;
    64        
    65         %Figure out which grids from the partitioning belong to different element partitions. We'll
    66         %call them 'border' grids.
    67         bordergrids=double(gplus(mygrids)>1);
    68 else
    69         bordergrids=zeros(md.numberofgrids,1); %no partitioning serially.
    70 end
    71 
    7248%Get the grids set up:
    7349grids=struct('grid',cell(md.numberofgrids,1));
     
    8258grids(pos)=SetStructureField(grids(pos),'grid','surface',md.surface(pos));
    8359grids(pos)=SetStructureField(grids(pos),'grid','onbed',md.gridonbed(pos));
    84 grids(pos)=SetStructureField(grids(pos),'grid','border',bordergrids(pos));
    8560
    8661%spc degrees of freedom:
  • issm/trunk/src/m/solutions/ice/ModelProcessorSlopeCompute.m

    r1040 r1623  
    1111%      [elements,grids,loads,constraints,materials,part,tpart]=ModelProcessorSlopeCompute(md,solutiontype)
    1212
    13 global cluster
    14 
    15 if cluster,
    16         %We are running in parallel, we need to partition the elements
    17         element_partitioning=MeshPartition(md,numlabs);
    18 else
    19         %We are running in serial, all elements belong to the same partition.
    20         element_partitioning=ones(md.numberofelements,1);
    21         labindex=1; %older versions of matlab do not include the parallel toolbox labindex variable.
    22 end
    23 
    2413%Allocate grids and elements
    2514elements=struct('element',cell(md.numberofelements,1));
    2615materials=struct('material',cell(0));
    27 mygrids=zeros(md.numberofgrids,1); %this array determines grid partitioning.
    2816
    2917%Build elements
    3018if strcmpi(md.type,'2d'),
    31         pos=find(element_partitioning==labindex);
     19        pos=[1:md.numberofelements]';
    3220        [elements(pos).element]=deal(triaelem);
    3321
     
    3927
    4028else
    41         pos=find(element_partitioning==labindex);
     29        pos=[1:md.numberofelements]';
    4230        [elements(pos).element]=deal(pentaelem);
    4331
     
    5846end
    5947
    60 if cluster,
    61         %For elements, the corresponding grids belong to this cpu. Keep track of it.
    62         mygrids(md.elements(el3pos,:))=1;
    63         mygrids(md.elements(el6pos,:))=1;
    64        
    65         %Figure out which grids from the partitioning belong to different element partitions. We'll
    66         %call them 'border' grids.
    67         bordergrids=double(gplus(mygrids)>1);
    68 else
    69         bordergrids=zeros(md.numberofgrids,1); %no partitioning serially.
    70 end
    71 
    7248%Get the grids set up:
    7349grids=struct('grid',cell(md.numberofgrids,1));
     
    8258grids(pos)=SetStructureField(grids(pos),'grid','surface',md.surface(pos));
    8359grids(pos)=SetStructureField(grids(pos),'grid','onbed',md.gridonbed(pos));
    84 grids(pos)=SetStructureField(grids(pos),'grid','border',bordergrids(pos));
    8560
    8661%spc degrees of freedom:
  • issm/trunk/src/m/solutions/ice/ModelProcessorThermal.m

    r1299 r1623  
    1111%      [elements,grids,loads,constraints,materials,part,tpart]=ModelProcessorThermal(md,solutiontype)
    1212
    13 global cluster
    14 
    15 if cluster,
    16         %We are running in parallel, we need to partition the elements
    17         element_partitioning=MeshPartition(md,numlabs);
    18 else
    19         %We are running in serial, all elements belong to the same partition.
    20         element_partitioning=ones(md.numberofelements,1);
    21         labindex=1; %older versions of matlab do not include the parallel toolbox labindex variable.
    22 end
    23 
    2413%Allocate grids and elements
    2514elements=struct('element',cell(md.numberofelements,1));
    2615materials=struct('material',cell(md.numberofelements+1,1));
    27 mygrids=zeros(md.numberofgrids,1); %this array determines grid partitioning.
    2816
    2917%Build 3d elements
    30 pos=find(element_partitioning==labindex);
     18pos=[1:md.numberofelements]';
    3119[elements(pos).element]=deal(pentaelem);
    3220
     
    5139%For penta elements where we want to implement MacAyeal's element, we need to collapse
    5240%the formulation into trias:
    53 el6pos=find((md.elements_type(:,1)==MacAyealEnum() | md.elements_type(:,1)==PattynEnum()) & (element_partitioning==labindex));
     41el6pos=find(md.elements_type(:,1)==MacAyealEnum | md.elements_type(:,1)==PattynEnum);
    5442pos2=find(ismember(el6pos,find(md.elements_type(:,1)==MacAyealEnum())));
    5543elements(pos2)=SetStructureField(elements(pos2),'element','collapse',ones(length(pos2),1));
     
    8068materials(end)=SetStructureField(materials(end),'constants','thermal_exchange_velocity',md.thermal_exchange_velocity);
    8169
    82 if cluster,
    83         %For elements, the corresponding grids belong to this cpu. Keep track of it.
    84         mygrids(md.elements(el3pos,:))=1;
    85         mygrids(md.elements(el6pos,:))=1;
    86 end
    87 
    88 if cluster,
    89         %Figure out which grids from the partitioning belong to different element partitions. We'll
    90         %call them 'border' grids.
    91         bordergrids=double(gplus(mygrids)>1);
    92 else
    93         bordergrids=zeros(md.numberofgrids,1); %no partitioning serially.
    94 end
    95 
    9670%Get the grids set up:
    9771grids=struct('grid',cell(md.numberofgrids,1));
     
    10680grids(pos)=SetStructureField(grids(pos),'grid','surface',md.surface(pos));
    10781grids(pos)=SetStructureField(grids(pos),'grid','onbed',md.gridonbed(pos));
    108 grids(pos)=SetStructureField(grids(pos),'grid','border',bordergrids(pos));
    10982
    11083%spc degrees of freedom:
  • issm/trunk/src/m/solutions/ice/PartitioningVector.m

    r1 r1623  
    99%      [part tpart]=PartitioningVector(md,grids)
    1010
    11 global cluster
    12 
    13 if ~cluster,
    14     part=1:1:md.numberofgrids;
    15     tpart=1:1:md.numberofgrids;
    16 else
    17     part=zeros(md.numberofgrids,1);
    18     %First deal with non border grids.
    19 
    20     %How many grids for each lab?
    21     count=0;
    22     for i=1:md.numberofgrids,
    23         if ((~isempty(grids(i).grid)) & (grids(i).grid.border==0)),
    24             count=count+1;
    25         end
    26     end
    27 
    28     %broacast
    29         labcounts=gcat(count);
    30 
    31         %create offset into partition vector
    32         offset=0;
    33         for i=1:(labindex-1),
    34                 offset=offset+labcounts(i);
    35         end
    36 
    37         %create partition vector
    38         for i=1:md.numberofgrids,
    39                 if ((~isempty(grids(i).grid)) & (grids(i).grid.border==0)),
    40                         offset=offset+1;
    41                         part(offset)=i;
    42                 end
    43         end
    44 
    45         part=gplus(part);
    46 
    47         %deal with border grids
    48         count=gplus(count);
    49         for i=1:md.numberofgrids,
    50                 if ((~isempty(grids(i).grid)) & (grids(i).grid.border==1)),
    51                         count=count+1;
    52                         part(count)=i;
    53                 end
    54         end
    55 
    56         %Build tpart:
    57         tpart=zeros(md.numberofgrids,1);
    58         for i=1:md.numberofgrids,
    59                 tpart(part(i))=i;
    60         end
    61 end
     11 part=1:1:md.numberofgrids;
     12 tpart=1:1:md.numberofgrids;
    6213
    6314end %end function
  • issm/trunk/src/m/solutions/ice/PressureElemCompute.m

    r51 r1623  
    99
    1010%global variables
    11 global cluster gridset
     11global gridset
    1212
    1313%recover fem model fields
  • issm/trunk/src/m/solutions/ice/SpcGrids.m

    r1 r1623  
    77%      [grids,y_g]=SpcGrids(grids,constraints)
    88
    9 global cluster
    10 
    119%initialize y_g, vector of constraint values in the g-set:
    1210y_g=sparse(getdofcount(grids),1);
    13 
    14 if cluster,
    15         y_g_border=sparse(getdofcount(grids),1);
    16 end
    1711
    1812%Go through constraints and add constraint on gridset list of corresponding grid
     
    2317                constraint_grid=grids(constraint.grid).grid;
    2418               
    25                 %On the cluster, grids are partitioned across cpus, but constraints are replicated across cpus.  So
    26                 %grids pointed to by certain constraints might not exist on the local cpu. In this case, skip the
    27                 %constraint operations.
    28                 if cluster & isempty(constraint_grid),
    29                         continue;
    30                 end
    31 
    3219                %for the constrained grid, add the correct degree of freedom in the gridset list
    3320                constraint_grid.gridset=[constraint_grid.gridset num2str(constraint.dof)]; %concatenate two strings
     
    3825                %plug value of constraint in corresponding constraint vector y_g
    3926                dof=constraint_grid.doflist(constraint.dof);
    40                 if ~cluster,
    41                         y_g(dof)=constraint.value;
    42                 else
    43                         %for cluster, we add to y_g only if grid does not belong to border (we don't want to add constraint.value several times for each cpu).
    44                         if constraint_grid.border~=1,
    45                                 y_g(dof)=constraint.value;
    46                         else
    47                                 y_g_border(dof)=constraint.value;
    48                         end
    49                 end
     27                y_g(dof)=constraint.value;
    5028        end
    5129end
    5230
    53 if cluster,
    54         %Add all the y_g from all the cpus
    55         y_g=gplus(y_g);
    56         %y_g_border is the same on all cpus, plug it into y_g
    57         y_g=y_g+y_g_border;
    58 end
  • issm/trunk/src/m/solutions/ice/StrainRateCompute.m

    r630 r1623  
    99
    1010%global variables
    11 global cluster gridset
     11global gridset
    1212
    1313%recover fem model fields
  • issm/trunk/src/m/solutions/ice/StressBedCompute.m

    r51 r1623  
    99
    1010%global variables
    11 global cluster gridset
     11global gridset
    1212
    1313%recover fem model fields
  • issm/trunk/src/m/solutions/ice/StressCompute.m

    r630 r1623  
    99
    1010%global variables
    11 global cluster gridset
     11global gridset
    1212
    1313%recover fem model fields
  • issm/trunk/src/m/solutions/ice/StressSurfaceCompute.m

    r51 r1623  
    99
    1010%global variables
    11 global cluster gridset
     11global gridset
    1212
    1313%recover fem model fields
  • issm/trunk/src/m/solutions/ice/ViscousHeatingCompute.m

    r51 r1623  
    99
    1010%global variables
    11 global cluster gridset
     11global gridset
    1212
    1313%recover fem model fields
  • issm/trunk/src/m/solutions/ice/diagnostic_core_linear.m

    r329 r1623  
    1111
    1212%global variables
    13 global cluster gridset
     13global gridset
    1414
    1515if nargin==3,
     
    2929params=m.params;
    3030
    31 
    3231%recover parameters
    3332sparsity=params.sparsity;
     
    3635
    3736disp(sprintf('%s','   solving'));
    38 
    3937% Generate system matrices (stiffness and load)
    4038kflag=1; pflag=1;
     
    4341[K_gg,p_g]=SystemMatrices(elements,grids,loads,materials,kflag, pflag, sparsity,inputs,analysis_type);
    4442[K_gg,p_g]=PenaltySystemMatrices(grids,loads,materials,kflag, pflag, sparsity,inputs,analysis_type,K_gg,p_g);
    45 
    46 if cluster,
    47         K_gg=distributed(gplus(K_gg),'convert');
    48         p_g=gplus(p_g);
    49 end
    5043
    5144% Reduce stiffness matrix from g set to f set (effectively rooting out the single point constraints),
     
    6255end
    6356   
    64 
    6557% Add single point constraints back, ie increase f-set by s-set into the global g-set.
    6658u_g= full(Mergesolution_g(uf,G_mn,ys));
  • issm/trunk/src/m/solutions/ice/diagnostic_core_nonlinear.m

    r392 r1623  
    1212
    1313%global variables
    14 global cluster gridset element_debug element_debugid
     14global gridset element_debug element_debugid
    1515
    1616%recover fem model fields
     
    8787        end
    8888       
    89         if cluster,
    90                 K_gg=distributed(gplus(K_gg),'convert');
    91                 p_g=gplus(p_g);
    92         end
    93        
    9489        % Reduce stiffness matrix from g set to f set (effectively rooting out the single point constraints),
    9590        % and compute modifications to loads from single point constraints.
  • issm/trunk/src/m/solutions/ice/melting_core.m

    r36 r1623  
    66
    77%global variables
    8 global cluster gridset
     8global gridset
    99
    1010%recover fem model fields
  • issm/trunk/src/m/solutions/ice/prognostic_core.m

    r36 r1623  
    88
    99%global variables
    10 global cluster gridset
     10global gridset
    1111
    1212%recover fem model fields
     
    4040[K_gg,p_g]=PenaltySystemMatrices(grids,loads,materials,kflag, pflag, sparsity,inputs,analysis_type,K_gg,p_g);
    4141
    42 if cluster,
    43         K_gg=distributed(gplus(K_gg),'convert');
    44         p_g=gplus(p_g);
    45 end
    46 
    4742% Reduce stiffness matrix from g set to f set (effectively rooting out the single point constraints),
    4843% and compute modifications to loads from single point constraints.
  • issm/trunk/src/m/solutions/ice/slopecompute.m

    r319 r1623  
    1616%Use package to set solution namespace
    1717usenamespace('ice');
    18 
    19 %determine if run is parallel
    20 if strcmpi(md.cluster,'yes'), cluster=1; else cluster=0;end;
    21 
    22 %for now, only serial support is in
    23 if cluster,
    24         error('slopecompute error message: parallel support not implemented yet');
    25 end
    2618
    2719%First, build elements,grids,loads, etc ... for horizontal, base vertical and vertical model
  • issm/trunk/src/m/solutions/ice/thermal_core.m

    r36 r1623  
    66
    77%global variables
    8 global cluster gridset
     8global gridset
    99
    1010%recover fem model fields
Note: See TracChangeset for help on using the changeset viewer.