From 31c9c67a90772ac49992cbaa4e2ee692a22a86c9 Mon Sep 17 00:00:00 2001
From: Ycblue <yuchialan@gmail.com>
Date: Mon, 17 Jul 2023 12:16:43 +0200
Subject: [PATCH] added prediction from tiles

---
 __pycache__/model.cpython-39.pyc | Bin 0 -> 11562 bytes
 __pycache__/utils.cpython-39.pyc | Bin 0 -> 24413 bytes
 binary_fill_holes_benchmark.py   |  35 ++
 environment.yml                  |   2 +-
 extractFGpatches.py              | 333 ++++++++++++++++++
 model.py                         |  10 +-
 segment_WSI.py                   |  13 +-
 segment_WSI_clean.py             | 474 +++++++++++++++++++++++++
 segment_WSI_cupy.py              | 541 +++++++++++++++++++++++++++++
 segment_WSI_nnunetv2.py          | 546 +++++++++++++++++++++++++++++
 segment_WSI_noTD.py              | 569 +++++++++++++++++++++++++++++++
 show_npy.ipynb                   | 176 ++++++++++
 show_npy.py                      |  14 +
 training.py                      |   2 +-
 utils.py                         |  43 ++-
 15 files changed, 2749 insertions(+), 9 deletions(-)
 create mode 100644 __pycache__/model.cpython-39.pyc
 create mode 100644 __pycache__/utils.cpython-39.pyc
 create mode 100644 binary_fill_holes_benchmark.py
 create mode 100755 extractFGpatches.py
 create mode 100644 segment_WSI_clean.py
 create mode 100644 segment_WSI_cupy.py
 create mode 100644 segment_WSI_nnunetv2.py
 create mode 100644 segment_WSI_noTD.py
 create mode 100644 show_npy.ipynb
 create mode 100644 show_npy.py

diff --git a/__pycache__/model.cpython-39.pyc b/__pycache__/model.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5e2536fa6c41592ad0cef8d85b9e4216ef2fd8ae
GIT binary patch
literal 11562
zcmYe~<>g{vU|?W6*O?M%&%p2)#6iZa3=9ko3=9m#s~8y=QW#Pga~Pr^G-DJan9r2U
z9L3BC5o5_^W@2Du$YqUU1B)=_u;+3_ae&#(Ih?s%QCwg)V-9x|cM3}iYYRgZPYPQK
zdkaGpZz@+RTNYmmM+#>zQxv~DLkd?4cMC%bcPew1Kr?fcpgThfPYQ1fLkcfcM97^X
zg)fD_g&~C>DkAL8kRp&G*usz^2o(`=XGjrB5pH2f5r&G0x-+DRq=>dKq=-UA#M~KD
z#8Sjt7*fQcBI520DH17?Eet7=P!S1th7_q3=@y0*X{d;#J41?0ifju*iY!z_%AFxa
zE=9hDAw?c4BJIwQqL8B4!jPf}6_IggNKr~rZed7KhKk6#Go+}bsJ1Yqs6s{LQsq<B
zvY93@7A;F<T%eGmzK}6WF;yv5F-y6bk&z))FhwIpGes*!yO%jiB~>L=v6(SSHB~89
z6(XO)7|ful^AZ$`ewvK8*mCnzQgezm8E>%{Bo>uqCgvnFg5;nW#Aab&U~mRSas~qf
zLkU9-LkVLRQ!`Vs4V2A{#AZQavm&wCnwg4HN*J@)7jV=tWN|KJWMrse$l^+2C}o6-
zbA$Pe3z<MkAedn#lV3X24Gfw*x41HkGxLf|67!N%Z!zWN-D2~}%u7uyy2a+4pI2s-
z0-=plZt;WoAw`LK#Rd7rsYd7m#woX0GV?M^Zt+zlmSv_E#pmT0<tFCD-(o9I%}mcI
zxy6!{nOJ;_GdUk@SxNj###@ZJD;bJ}85kIT>FH<W=cWdi<QEz0SLP(<>6hmhWfvDD
zCa3DV`8Wo9=%?i*7H8;#LsYMz@|G0Hs(6UW#qlUM@`J*Y1LQ$Q8%91xF2*8J1_p+~
z5%fi%2-oB(0%h(ZHU<WUB6bh~a%~YOhy^MWinu{69tH-6Tl`R;mF8up<rn3~7x96l
zKt3!I0I>u?1SkN(p;#mW(j$#6XrKmwLZTRC8v`tC#2FYELRK=~V$aMg$Vp62O$G%V
zG6uOA#4lE3U|>jP0F^yaOeq}g3~7wua)dLA8Cs68v@@_UM6m`lXmZ|Sb1p3|$<KYs
z%)r3#5|q+kGJzriW*Epo5F6wakdk5+Mo{`MVXR?jW?I0skb#k*hB<{Xo2gi@hB<`^
zM0+u$FqbgbFgG)bGc+?cF_y5@Fs887FgG*TFvPQ#uz?%{<*}ErrLd;3L3tb{Y#;|h
zd7LF|DeNg6P##wa8>rZW^0-TQQaDpMK(Z-ZDcrrRH4O2*B|Iq{DeNiiHOz1^z7n1k
z_7pad4RA635>Ro<n!<{tPN0M*g*Al*<jNGT6s{DWUbY&Bc)=2(6qXt$sGY(!3=2d`
zL_xVWg&~+hlh?0E78I${3=9laGT}w}dFcwJd8sA3;H0mRl9`*DSDcxjXQl8`0Ti~N
zD&l1Wh{eFjz>vwn1R_`%7~C`&Z*i3t6es2u<YeZhYcdrnff5I6a(-T!sixR1*5cBF
z)S@COkRV5VJUGq7-(oH<DZ0fC;($u4{JgST%$a$~w^&m0%kyrrf=ELMWdxy&A(RP(
zGQGuAT5yZGw7>vF7=j2R5Mc}=Om4B~mzIF6zr_M#++s~ED9EWS0wq9AmRl^vsX1x4
zI5P7JN=xFCGj4I_mzF@89AK}zWah49yu}$GpPZ9eTpS;dNbyCg3=9k)Tr3AlSB!j&
z3QT;AaL6RW2!UdZd`wmRkc6oR^KvpMU4pcMFeo{Kk|nrEZew6zs9{*ZP{X*8v6iWZ
zVF6<e(?Z5t<{E|tOf}348EaW;7#1+suq<S(WvyXYz*57ykg=AnhG79~4ckJ-TJ{=-
z1#C6!HEau+Y8h)77O>Ya*04g^95swJEKoLQ4Py;6l+9HGvIC-?8*DFQ4P!9FN=83T
zo+1rUvd{t%+8{y)MCgJDJrH34A`C%<5r{Ab5hftQjDdkclcUH26w!<ox0osnZ!uLE
z-D0XRzQt5w62(+u8pTv$1_@1YNdhiJ*wgZh$`gxHEJ1}SD3LHQav>lS4<ih6F;?+G
zgA}Soleq|#c#C8}wu930O14`Z@$q?yxvBB-x47ctbMsS5b3kmK`1r!o#2knWI2Mas
zK+fcaMsjj$P7W-h!A`RQ83}S9INhp%+y;s+4n{r^un0!s1u9!W5nBvW3oE?9MHgcV
zQwv8Fa|&|`OAA93qUd71#RW>E@ku%P$=S&u(?R+{7{msZ%pfz1Z-DD-%<`%zt%P|2
zO9^upD<~>cm_X+DvXn3{U;`zd66P#+P|4KG2v)&V!-QMKLQq-3k(UQ4|BCiO%>^YF
zWL;bs=5p4cnv13aYOYub_X3_2_7Yx5iJHP8&HyiiIFZUAa5QQ%-QvzpEy_#Hi7(Eq
zN(HrNQc^PW(nE?QK$W)uq?m%2O$xu*71A@yQu7ow`HDaxU*rXfI8YV^S9-U&f>R4i
zQ}aMgq9Rb9yT#`TZWJV^`sEkp8l~Lg^hr(3t_(``3BAPviVkq)h^ops<rZ^sW$`VR
z)QZfKB2eYDk_DW~qgXTZKqUxUKDZ<)vIf}=PC4K-1WrC6r+9;s4=7VHF!C{SFtRWT
zFmf<*LC9Ym92g>;Rl*>rKvD+w@&i<sfRYEe`~X$e@bUwkkBjUW7#LPE76pMQCa`0R
z>_M&rI}A*KT?fhpNg&sOk{l@GRS6;449#XmuBbY}J_KtA1vUs5gS2xn@`!_jpDKAn
z1mTYOy!?Pfa4NvaC7?zOC>V+_;L0UQ(9{giCB4ijd4wf}6*Z4wlZU3{ENBkd1~n6&
zL%@0vsUK!0Jbz%5hnfk=9c=L2!H$$WIFNG3O2#5kGJ@oTB41F9_<_n>XtIw234qcX
zIN2BZgSepdUK9Xg1%e1rS^%f+qF@j=1Vn^_2vCYH3InmgF^fnYAoD=og<|Bi3rfLU
z;B?FdO}i)}oK>>eVvhV&ToeIvStN)6y9`W#Qy(bt)}tg|8LU<lOva$V1NBo7$rvO;
zm1Hama*-({Cliy7V=&S&G_66>FJlT5I4vQ{c;*xqr1XnJ9+K9;tv*)2m*6fzG^jNW
zE>tx+AtfisYDm=x?k?a=Lby^&D<~?2K=l_RA0rDSg+Rz(RdQH^fmVrP0>~wxA_-ig
zfzu;K6zCC56`=5Fftkd?$irO(5}`_}5C;VZdZI9_FvKX|KwU3z`DTWr*HUy0DSa@e
zFiA0h+X0X=4$NN&Y5^d(1;C;;U^0b0g$0_Pz<esGg|tH;_A?=yw~(iXA)X^Ir-nI&
z9X#+M19ds16ot72QHrvruz^!Hq!h&=4@ud1kW!QbUW#%em7-jbQuHM#L4eyBMToQy
zX+L6=rg5Os22}b{yEL5&N>8B1GOQ&a#K^<Q#VE$a!vrOngg|YB_Z-4pj4Vvxk`+S!
z;NY%Oz>#cdm5+9UTnh>|a6Xd9Wjn!~1Pab6Fta!qc~pu(B2>vqNX?b_0vwG5P<aQ=
zL|Ir0*DMxr^BG)lg1h(726PQWJbMXq76&*xG1f4|bCxh?fjZMnDKNEMu!ckpLp(Q%
zS{`Uo3rSvx<fh3BDQXi?(-)|sz6I_t8-nZWTdbfFEyE&EwS9}VC^e_l5F%{^kuib@
z8sV(8_k#i()G7oeGf)!aV*-&?(nwJV4K3Vh2Q*#;ZeD_#5G4#*j5UlPuct8gGK0IB
zppGi2i^>A(J}v}xT^X^c0(DqHT~&63Do{sLlf_Sy1(F-Vy*D^<i?b-TASW|9u_SdR
z<1H3Yhg_567NgrO=7PkOB2bzs0yS8R5<x+j1|m{HL<)$&)kruA3Jy?Hk%3WwQ4EZ$
zq_6}9!GsKow|yYPK$(Jrk%y}YBm#02sM>>LP+|jdNaza|sbd?o0$BqxtQa%^Q^J7U
z2`pi*VT2Fuuz=DVxXrK03Q1ug;~<qesC^FZ2jEOCmtj5vrIsoYq%j%Xi38*+aN+=^
zGAw-${H?a@2phQ=tAvqlBp3%EpIw6K=V0Vvfdm~Wypb`;fgnC9aiEDU4nWxeWLR+t
zM(F_=I4m-ORAdWSQy3RAmaxI%Wg!!&49T;Bb{e9fsyMK!5-Z_cz?H&W!VT>tvmh$4
zm8_5o3{sdN>M9;cOoJQ^iD`JV?G~qRVnslHevVNJxI=b}H#e~&z92t8CmxjQ;nKz_
zMMU-j?t%gw)B^`45m0}XgNf@e2P>Gx$n}qdwTc%sWJi8_S;<(Ghc64i1vvne9l%u)
z57;t-Nd*)%cVHSh7<mMWKqA!0uD8J6^~2dOg(n)&h#7ju1&!KZPb{EptjPq4ZcPqI
z{1$?O1+6WT3lanMQ6Pbat400^<abE92WpXXFmnB?5(5Pt+~MR$SWyAU4Y*3&pCFsz
z(Itv%C&73FdHEyE1P(?X_9Bo7RpN~c<P^|oLTU;|V;df61C`T@azH(LXud82g)g>v
z2UR(cc+UqZAu<R52Zbj}4hH307Ni_3j0hT9B?~@A1_p2^8=Q-UkSrvaCP2aQ599|>
zs^Vbe;V%M-V5A9<EV$SQ4QjxKVN=+^1F=jg%q^ffB*>gJVjz~iNS}d$!7UjS{~)~}
z3}S;Yxa2>9S@N^gFvK(0uz)6xihOEVi%d#bvREO_BT$#Bh9RD@a8?OR7N|A=HJd<k
zY~X1g#yr0o7El+csHKLrC<m$!(#`};FhTTz#_-YgiIs3H;7s8x;i_Q)O--e6LF$Ma
zwxUfX+@M-VoS}xfh6OxL#givg!iz2jDqdD{LyDJM97&lu;OW;8P3BuX#mR{|sqtxv
z$tC$kMVa8C@x+|W^t|}w{Gz<nqT(XZ%$g=skq=0Ut+=EpGbI&VA>HCEDS{e=IVuGT
zS4iCi9vTHzP`5ZjAyY-EMWB)iG6RX0hC%&oNRtIr#%Z#H$1g$ihVjW6@rE#lQBgQ3
z{ew*g%{Ub)gSc3Ruw+504%EN|RbFi10ay`c2^h^J#>~Pb!d%5&T7XFQplAmr0}uwe
z8q`7uH+QBXPt`EiFfCwQ$dJM)$xy=xV@WWiFiJ7hGDGE<;Brio3{W|i5~eKX8m1J6
z6oytPS;LGlp@vzUp_QqI1u6nEfdwRz#sV@c9n!;QLYfL<tpQDiqBIjg-d)Lfi#a7T
zSCbW-zHhOB2h>?h@{5u)ZZRh(mVkp;lMNiYMWETeB1cf{@)egP7L|DBrKDE4Wab*(
zLggFd97i?>1v6+anSqguRfbW9(T$0Vv5Kp-05;U3$>ipSdn_50R%Ad%fU+<LBcBx5
z)zr!0w>U}*f(sIhic{T^LD30{RuBfU;kg_%)`MA9*MPe{QVc0f3qiS)72M}z1V!;e
zP_Gc&M+9|@q!=J%3L~h;n8FIq>zrt+K|My$WHO@^18719Y$7*U4HMWjNWYQIuP7dr
zltFQbn4;BWDk=wwf^s0NAK45NK-3YSR0{4G-QspiEJ@CQOmRT^OQ6hli#;V3)BrRp
z0`-|7T}op}m(qA8G#yldjI0I`Xt@hZ#&QKk7pMckz{tlW!YIHb$0)!g!dN8;3vKKf
z3e+D54Qzok)EaQv3ZC<1Sil5YjIsdSsRXkiokvDc=du}vtOaE~c6io<vY^=ul=VO&
zpp3(~0Lp^OL3;b>84siel!*};4<rK856^hw47K2y6-{PH`3WxkG&yeZ<mKl<%F)b{
z%A$5qz=5W7!694(8WM#BKe)SE2dds#(ehhSBS=pZhyV|5g9-Tfb|5H-L2<{xXu=4}
zR5nai0`TC4)|#4(c=8n}jkthJf#xfoB9I96@)e{pg(qKa0LL(L-?fHt?a2nNJuz!R
zHUhOLL?39@8@2Xi%Y*c=I7_%uYtIrMv|5rER7(~WgQ5yLx6%cQ8*nZI*EOI*qo@)j
z1}d^}*J6l{cLzu@mi!Y1iWXR#3|cWUK`SOE2_`WnP)>q`FZP@Sic(PG0OzE0s8tg~
z79+TI5(chp7*m*Pm=>@sWJqBKRU$B!1Or?gl>QmP;*6k*DvbrS%mp;mlNW(z0vp@}
zHjqlV3B;NVUO)pfxr8-~t%j+FF`WstP^Om&yyAf!yyAhmh8eWF2b$MFjSp~!D*{c^
z6oKZXkc!9}P+W0<s|&~!ENGmsC>s>$pd42Onj<O#EnI<Ac#yeUM1_a5LdyX~52Sbl
ztw?ZUieM39<YENnKWL;7lm9@eClX{5D6%*h`Gkr<A{hA(*5V*#yuuXQcm=3*1sPTh
z>L;MqXVCEqE$~PtBd7)f4d#JHJ@brem{VA?nTp&>n89sU@Wg*FxW>y9D`8*2k-}QS
z2_2hYgS0zA`9zZ)8bjc^3N+9Oi6+D-1s9|j3vw!?B?!(w;F=0CA&J)h!_}Xy0R<zZ
zg#j8F7h&XJ6k)0o0Il)K&nrV@5zyo~sCoxs5E~R6;4A`)Op*tyT0ypgA`jeY;fL7<
zjeFb;8jzQ(VR|_jc{qzeBA^8-xrv#1@$s<nPtX#Umkmq|3`ISlBn#$(!w0mYMU%M*
zv@oW~3M2(8el*n}V>X~feTxI!nA6M41J6@L3B$O0X{C9|C7Jnoi8;5p5bPpQ#<;}|
zR#lb>TH93w3Z7e>U?utaImJbwpt{A9n^=+oUdsqxGj)pvGz4AL32Gd&`GS|!fY-o)
z$CipfO?2?g7<f=0JRk}lkpqvlfIIHso*E>3f~p>H`Ui(aksHWJmZHSGlv}Le^;ox9
z3yL!HN^Y?hXCxM+-eLs}|K{8Rt*<C3EkRm21g|7-ao9i>`PhM6UJME*4kivp9!Ai3
yHxCmB3kMqqJ0FLTnvkrJtdM|^mXL@LmyoCsuaKOOhLEU`l8^uw6DW6s5)=Sqx92|q

literal 0
HcmV?d00001

diff --git a/__pycache__/utils.cpython-39.pyc b/__pycache__/utils.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5faec9d908ffe50e1bf850ec5159f734e3b969cf
GIT binary patch
literal 24413
zcmYe~<>g{vU|^WbwKZkOTn2{6APzF-Wnf@%U|?V<4q{?pNMT4}%wdRP1k+4WOeqX0
zOgYTCEKw|s5INQ;R<Imf6dRakk77?@NMX+5$mNXUgsS0+;s(p{<nl)GfyG#I_;UrK
z1i)<89Kl?nC?PPLF-JI8BuWI#X3G)H6^jxBv)OaRb0wlAawVfA!F-M!sa)wOX-0-r
zrYxCsh7`_4jOh$1TwszLO!9z9UNFfACi%gn0GJd6lR{upcoAbdcPdksY>G%PQ<Pka
zXo^@1LzH}qc#1>|LzF^_WQtS^LzH5Qbc#$1LzGgAY>He9LzHrge2PK~LzGHNWQtOX
zatmXWYKlsVY70Y@T8dhVdJ998dP-D^Mv7(&W0XdUR*H5DLzHGpbc#-jZVO|SR!U5Y
zUW$GTW0ZDEY>GjOVGCoFPKps!m2rwm3qzD{ifM{j3qzD%N?eM0ibV@!lzxh3id73k
zltGGhicJeclwpc(id_pslu?R(ibD%SlyQn<ic<?ilu3$nic1Salxa$Qid%|%3uBa7
zibslP3qzDSI36JJ>6K%VYZ+w;4o~kKt6b|SYcSg<$0pY{%9fEKB_WkD%P!S6)jrie
zjY*Or)jq{<4s(isN<a&1lmn_<O5hx(l%SO07M3W-l*E*<l<*eDD5q5Cl!$Dm2~0(P
zsV>b7j0`D^!3>&7FF~ozPm}Q$PjW_Ta&~-vX-Pq8i6-MMma6>xTusJXTrP<vi9Y#>
zDXB%8jJJe?OOuLIOM(*fQu1?y6LSl4z{0FKiAkwBnoPI&lQQ!Xiz?$&GIJ73GV}9p
z2|@+ZGIMg`GxBp%i!~W<v3lkvrl+oCE@EV0U_cNnnP2`#6M#taz(i5BfJ9*eAa)Um
zSjh~g5c*&OAW5+3O6Hf{3=9k}T|tB}0|UcK=9djH3yQ?S0$?*<_P|6@%m5j*lJOR6
zN=aowYBI<NpdbWcb_NCp76t|eXHbTXVPIe=VaQ^vVMt+2Vd`b7WrXsW!91oCh6PMD
zj0+i4m?arfm{VBNnIsuvm};48S->((7&5H2Y&Fa^EH$h(Y-udP44SNdFF}@rqAIZ>
zvl#5Rm;e9&|F6k>iz%<*7IR`=<t^6ojMSo3P4-*NnYrn=Sc>w?i*K<c=jRmPVkyea
z%ma~$6}MQD!R+MR#EO*+MOq9D48QdBGxBp&gG=&@4D~B>67%%S^NX^J3lfu4_1%0N
zgFW=qauSO(^h--JbBgr}DsS<nr<OS7m*%Bp=A}F3SEz!509>>&@-gx;N-;_?73nZA
zFrb7ND6Cjv;nfBWuN1}-CU6KbrZCqqrZA^4q%gEHr7_tsfcaov3Nwfg4Nn#kh7@LT
zh8l(xC@YPrgdqzW$|=k>Of?KO3}E#vH4M#+E)1~(wag`~HB2>(DXhIrwM>O+B@9_?
zHB1nDgBdj0{8lo(WClgP9*B?z5wf7v!o<M9pvicPv$&+FG`XY{6tG1K3=9mnSgTTt
z@{4aVXXcgMVlB<gE3vr6T9lZVo>~M-6SvqCixZ2A5-T-XZm|_5reu~D7a4%Gu_Ptt
z<lJJ+1P3{U&<2U~rl;nm7A2OX#)A}q98oL)N??q9i~@`TOe%~#j695dj764sLo|Vb
zfuV+B0mDLuTE-H_ET$TU1<VT>QWzycq97I%DCMOwmoR3rq_BWOqJ%XC6e>*O49!d~
z46!1$%q46!%#sY?Fl5eVvSBE+sbN~cp27-B(+in{8CJ4Av|(gm;8IXfQ1DGH$;eMp
zC@oG+Q7Fk*fcZh8C_PCb2UI>N<R%srE2QNYDU_t<73UWv<tG-UD3oOumnP<9R)Nb4
zut8P@iA9OI5Iwnx1y%|k72yyb$ONcnh2;FalElos%)E4kvc#Oy)MACSqWoM11CZT$
z@yR)f#l@+`5c7&sOG=CKtQ3qZKqdvbJ0Y2dWK42?PJU6bLS~*qNk*zdT7FJWemTha
z{Gyc9A}a-_oW$g81&yS{<m~h!P*T^_Q3y&+(NS<ON=?nvQE<vBP1R9wu1w6+QSePn
zPt7Yy)KQ2`1(~9w;8c`fo~NVW4^9<23IU}>1v#l;*MT!`5h#5Xfnr*d=@v_IW>spD
z3@F*@gNh@LTg<tM6}MPR@{5u)iVQ*GHXy<nM3{gQ3>(Dhx7Z6(i*id#QdhFv;s6JE
zZeqbLPIzeCVk=55F3l+^vH<A><#VJ&C!U<2SC(2-5)acDUy>hRl%8Y-&&q5ZOlpim
zj6zIBZs-XQl*~C`S$Q8u!pmZYBs);H#FFS(YZz0QpovZbmgp9+FGNmw7A1^X94Txi
zj9Hv1?7d92EG1kej0?DH7*d$BnLtT+AyYGB3{x#@EgLu)v%yW&s|6)-o-E!J#uSbe
z5St~1vzMiYse}&%YglWTvzeM1LC}Vwu&ahCm_d`P$Ql$xpaK>pYFLXi5(|*xrbrj0
z3{*nAWME`q&}6yAoReREizOp7J);O5NuX%d<S()VsRNZ2koa>4apCb-1Zv{lV$Cbf
zEvUT3lAo7aTx0}Nzy=~r!4X^pu5oX%gXqk>5^%Yq$zBAiYKmMy%0Sf_Bwj&9OB63S
z7E@C~N|Q=+GH>zZmF6a;7D0HBNC!thxVSOI5%clonI#$VnR&$}iFwJX@oAYw#U+r8
z<pD|)LZBjvje|*vQHW8DNq|X;QHW88sVD$FZJ-qMWED!t>4S(usR)#0i)=xm;sheV
zJ_QqC?`q-lZfZqxYC#FIKS7mvF}NB-r2RmU|3F?x#-N%36c@#yOpwly!ng?3x?=Lv
z)V#%-1S&UfF&Cw#++s}!6&1Hwl5$E@Z?Pm-Cg$B@2Q`KAN)m6eRf3C(TdYZ-qT&`?
z0l0{`#g-2)GC(zXW=U#MFvvq8AR-h*gn@`~5D@_)B0)qHh=>LeAm`IX2RLxi2yj@7
zrKgrSgNjw(!~(bcA|FsS;G0;G4N5<tFk@gWie+G6cs^?;2;8(!=0plrQ0E5}p5TDB
zVFI<Mni*<2OBic7n;C1lY8V$VEo7+S%4R55@?uC~Y-Vg?EMd-KS-@Jt(9E!qv6ef9
zsfMYBDFs|}r7+d7*04g@C2U#j3pi?+Kt)Fl(?X_No)XS1t`wFO)?P+Nh7!&N+$n4e
zAuOI61}KX+g&oS`PT_#CvUqA3Ye2SerEp3xq;U1JfOR9Q<c6zcBA}87qH+OG4KtL5
zYz{9}j0e=3$>K|4l4Pi1gwPTUDST24H4G3QnkZC_I72HFSOq6kAEzV(l0HmPkUmaH
z29Q2z3yfJ38WLGNH9QEv)NrKm_p)O6TL9wTES?$`gj%d2BnVf@N<gI$MCAgW8d&VW
zLnw<sg(-z8MYxv<>=J<*SX>D%1a);7f*CYL{C)+8<mW5oCgxQ_>i=Q|T{{K8{1S!K
zy!_Ji3<XFfs*qTgn3<E9l#>c-2_%;mm*nR{q;eAr6crp(QWT2wb5kKg#fsqS`z5IU
z(`0%HYNe$cgMos>O2%94X_@Ju%KYU=P$ABopO*IW6DVVXYWtPUpmuj+!7Y|DaO=A)
zH?iX7XHYZq3yAm%QU|X7S&A}1>`V}m1u};<F{dCS@#Qy=vhN_`2dI_6bc?OL7}P+&
z#g+l4HHE-=>lSlPY96@$xy77YkOQe}_<S;pOHxz7Su;1W0Md$O%1yk*lbcwaothF4
z?l;_GF32gl#g>+tURsoTi@mrssURo6<Q7|IZgEC_d668*MWCAO7C%&~I6g5YtF*Y}
z7JG4GS!!Bl`YqPvoc!WcP2M791_p*G0Z8{UpeQvZGa1xBj1n)%FD@x4%1=%$E>2BB
zk`PME%uCEc2n4x1-C}YNiQ+3LN`<R;4~Y^)<rk$V-Qr6t&B+M@w{!v$OEQY`L4n2t
ziAk`Ji*JeKCRW5l>csf`G)R-D_!b|M8sEf%TOhlj4)ske&PJ~{1wo#40|ySow&Fri
zxdy5n85r3ZIT%G4`Ivbad6<Nlco?~$Pz+2%NQhVqqXd!|T!x8{iHotQ16139`XaD$
z5mZ3J3&nMamKJjgV-0f&BLvnkr!d0WG|h}Q3?)p>47DsJ%rz{{jJ2#a%nMi+GSskU
zGZeddF{Cho3r^N7wgv1Ubqg75*-ALFI6(zxFJmox3C9Ai8uoOC6qZGd3z=#;YB&~f
zgECbO`vRUCa6wqZk;PlXT*H<EDqtBU7*g241tG@*K2Qgjk)ecR0Y6Ab3Md=0`#~C1
z@u0+$1WLFp&tZNm>H^gT5Z(v-B2YISoV|+DK^-pvP|vlX65R9wr|hCKkQv~XRXK<S
zYDE=QfLPTaq6QT0ERGea#kY8hQ%mAOz2x}(w6t3si76@Zi502EMRg!`4IrWsWCLfE
zd;vUZhG&*!<d>E}noKUx@r<HokfES#9h_N}dW#F8yr>0aOe=^0b!BfcgHk_JVg-7l
z5d|e0q+|_Frp5C>2}cU#b4CtE8764rVd7yFVB}!rVgx0MqABP}0hA0tO+;}02nsq-
zy$5RS)-X3S*0R(vfa^yVob@9ss2&8>g1w+9XIsEg!<NpF!VHdP_8RsDoS;anVFN`!
zsN}6-&*H9Os9{ZEsbNWBg-0{n0v>2Ivn}8S>3~HuJcf%pL7{?ZOM_#RkJu<I>ILcU
z0}=fo0xixsiY9_YLGe;F0mMR$hoVU!vB@9;(LMn;O;oT(1T@xR@x1{QFJj<$!5Sx1
zL2&{a!vHA(VbJIhsNDjN6E#Lqn<a}8G|~ggkTswT#0nanNM}f40uOjHgGm-J$qFXf
zz$81E<N%Y{2UNLB7_wMWczc;@*-99)ShLtt7*qH_Vl|AQu2Tws8dHiuFH0?Z2?Ho*
zYuMA6Kv}znEk#h8p_Z+Lvxcpiv6cg(x`rd0p?HfILy8c%FyPAKhSajPoF%+jd^OA|
z!o7^OTqV2<_-nY*8B#>RX@t9mdw~Eb#%s712-bjGIyKx2gdi=Z8tyEi8rB-l6vi5k
z6j2F=8kQ6>a65r_fp85IC_R<%E)W6f2B#NsKS)joHMWbmLD2;Y#UefsiyuUQs@5Vw
z5DV1rC<4Vvkpzef$~8rx9#|2m;05=gSF#j&f~34a1gHZB?utQ*nFsj}MQAA#R2vuN
zfeHXoa79>D3DQ>u%IU1&ahoD=Pt6x3>IWh~Mb0hm<ovwk#FEs!#FA7<NrbBqB9MZ4
zz;z>fG6q#zMeQIraDdw-p!$eEFSQ&p^a`oQir~c!?qn{5n#}#nQj2mDE1~t(eo*oT
z^_CczgqXM(*)ZWZCN7q5Y}#^+e2g4Spc;*bg@*}JC~<-7t)e-gEPz}?fwBV#gR=lA
zyubr>Oeu`LjJ3?*!A)jJ@?XeQ%TmJv8kAyW0M$t~ETCjj!?J)Cl7vba7O+8!76wp~
z291aZGpuCxD+1LU;8JBJM-jZN#~;iqSs}rUC@65244j}K0}oL%F)}a|gVG=aBdjO}
z)!9W0Kz;`K5tQXY7?fB+80_Z`Mh1oy##+`Ah6RizOrQ~p8dhlrPzMA=rZIztdwZE{
z7_yibGSxCbRWU;4nNwJMnQIuqa-bn&P-rlMx-H<s1C-T4g*35+1Vak;Sr<^@!JZ=6
z%LFPOvN*ChQ<zhPN*J=(OF-Q!?u87^j48rtOerG0EVUdZ3=4Q_*lRez1qgc$dy1$u
zLoIs=Zw-4hV=ZS5Bd9%4!<o%c{LhOaMGRbk@MZBY05`&Gxk?1Hgla%O5(afzL_pmZ
zVbGwrG(!z@8dHjRFJmotiQoc}8t!z46bW#_!c)VuKonFo)Nn5ls{wb6YIqiiLsCTz
zPnLKMTMbtVa}8$-6S!cJ1Q#rV3nZWgi{JuDkZy3nBIO6FZWtICL>wRu4^Ta%$y5YN
zvA0;kLwlNxMGHaMnkr>a(KL`z(?JAUF#|4Pq#z{>Xix|21jsNSyuc}%0n&(8+<?b|
zKxIzROpq+N)q}IF0ky~R=LBeD98uuh;!Mj-_ejl5&nN-6m81Am^2_szlM{1N-4c^a
z@{7RbP|+-q<+DKq5yg%oC=Vf;IjF@>yjx;%2@fc5ftqX#j4X_5j57F3ARZ;85=aHw
zEP|Fmi_vo<sQv);$iTVr0CZvjtrBK#233MA;7X9ahH(MwLWUalY=&YJc#9Z3NKnEI
z9!lT<4G@4+F*hjff>SX!XiO8DidlLYYdK3e7x2_@rZc3lf>SY94Hu+ARKvM|uZ9a;
zws9@sho)k#EdCnS8jcjk8uk=sa4Kd4r((_p0?<^<xd735WrtRe;3NgEui!->q~;QW
zq$@;10u8Gafs+b2<xrAdxWOgoE#~6Xba4B(Xb~t8fGVgc_TtoZaMy^~R+kVcU7)50
zBTzyB)#40{LhzQa3ThNDMUP@oHwc8mQ4DJL)i5q#NCC~KF=aE=veqyzU|h&h%LZz%
zN;1^2)v$s(L0QZ-%qh&^P7Tun7PL+cE2zk*VPC)o>(nd&b$vL&-5<6Z(A*Mh4T}Ur
z4TA`SIKx855{@kHg^+d((*ho7lrcdf99;0Q`4xe>?~rB<BDO%0r^$DVxwtg92t2`v
zzy4AH1vRL&xWx*dbAU8Okh(13xIyG)NE;XwH{eVgh17+0&M&CM-A<4PMNE*p6Rte#
z0*WOOP+bSgh~OD0NHai-k&Cft1t@+%MJh-E2!kp|5C+E&^&0_2b3x$(>NOS32eDAw
z_Tc(C0OVlIx;q&(91U_I2!jd<5C*$a30!U0FoGr{KyzK7X3GL5Xpe{y)O`kxE!Hq1
zTJk9@wQQh1R0?Yf8+hc9Z2<>#l#p!!XALObv88Z;#?WfmYM?B%(KNONTq&HOqM?Ls
z0XH-iflL7x7bR?2oHdL!ETGx86z&urZ~=j;12R?!(g7JEB--9A&Kg#z$$ZceJQR~t
zAR~BeSv)C>DU6_|0;Kt+DFiKwKqL8@97RE(;3qsP{Ss6PX)?V8wdG%eI<4R~+)Gev
zER%(afuSfBG>`=<MQ*X@r6v}o7MEyZ41Dos=9Q!t734$K7Q6&iL+NMFfWRGla4{Ig
z4lP=Wib2hJ-XgSO51gA2Wh5f&)q;Eh?pcBcwQjMNWR~Ql7S)3cVF%~1Tg*90IZ<qx
zx#__rMNw=yNjbqKMMa>VeiUa;QVysLOihX61P%5DXJn?8pm+O4@*xfHFyz_JC{Pkr
z10_UoPhW_U1w2rs0Ui$GVibYW8c<fzW>79ePQNH40A7p?44{FZGKOLaP$gEwEDoL-
z2Q|o`^S~@M%qc9{OhraD3@NNBtZB@_ARTPkOhpAX3^gn@OeyRV44}$Q3e1DBTA3sn
zYFSG_y;z1C)`d*9Y@qTKuA3v9sc0Pm-Rw}^>=4~4tTpUVyE(I&CNP5BgV%O;&}@A+
z^905s7Klv{mqGN1LtPAUXP!U}Lkd?7OAVtG14IO5Hwf1-)UYpPLihmUh7#r!rV<v=
zh>18u3b!}|D6~>ogBdh=60b8sXSSV-@{5ag!xD2cQ^2#-y1@lGnI#JOX$qNnDVfQs
z#R`eZ$@xW~wGW`#=d}Es6a~oOqC#R`ib8Q}Nl9j2I(W7>v^X(6)e6i6&6%epmLz(*
zL@3xQ<Q3>8f@gX(Kw`zIC3^AkIjMQ^@fw<%aOKJ%WuPg*{9L`_jMB8UoK&!KPnQUU
zBG3eTCTQ#$Vq9WweraBbTYgSTu_nS|kmV&siJ5uvnJE=I3T25o5JpLAaS4c}V5<O{
zx&e6{q!sMpVwhdFASZgdMCd3W8E%Wj)&b3)r}*XDW@;Pi!2E9u4F`xzp_BYjldKdR
zb8-|w9C(lxm*f|vrhxnbo)=XB_r$>=0o96RpOr#Leo0~u)H$Gt0mT7y_8+pE03_g-
zZ>0e84K#9e6qFU59fMp^1dA1lOA?Dp6hL#I3WnhLOV3v*EdWI<l8p+A3d+cif%?r#
zAp{ht3i=RQJ0!KZ1kMRd%z?8(653&jIS?~o3lvI93vxg+3wgzv#h@eu@`*9XwFQ+B
z{}(H0fMX3D_+^PXU>X!>pcH_-Bm!%~ECwZ8u#K85xA?#Y#e-rVWWX&h2p`Pmgs{sJ
zb8fMMnN|GmnPsVYu&7k9tKzp&$ONsGNX{=RN=+_NEZWY%z~EQ34OEbV+Bij9L98tx
z0@UNG;)SX&1LYz`P1alN(DV&%xZdI@PA#d(FEUKI#T5ox&EZ;9lwWj<xwxcAlOu{9
zlrTJ9BBHpFVjzkQ5~ZLvC^&yb@#U4~#+QH+LOe)A6dNRfqBy`_0O@B3`OecN0=;w=
zgr)Vc#2iRcF9a3M8la+?Pl5?F^1;Q#!KlE<!-$}{7!??~7?qd=K*ck2(H>CY4Du>S
z69|LmH{nen&_r|zcqv#3BWM{?I&(AQ0;Uw^g^UZBQ&<)<)-tAmX1UowQ*J5j;tVy6
zDI7>_@KjtdgC>_>6<1zaW=dwFZgFmA6_aje6>Dipny$qyMy@Jh=hC90)VvY}_kd7^
zQgA_|0B(L(G4XP}1Ucm;sKU@>DFQjIXg6qZi7h2HIX@-!7ArVyfEJ&W=4F-?fmait
zHmYt36eJcEr^Z99k1x*6oC^vgaCO8e$5?b2<bROAkukh+1u`pzv4o+7v4m*>a|sKm
zzqOEY0oy``EcO&8X@(T$W~OFFafTEYanLLQs~^HQL5{vK-+;UF4J~?(;HGsIvx2Xa
zCI{FfMa#jyDJTZ5NV~-z0GUy`#gt!si!D91q#!fp7I$uHZhlc^d}dx+{w?OB;$oy!
z0CHXtcvzSZ;>#dMU$DYOAU}eFi-ED|6v&6jF$`+(g8UC|K7tYpB!*i+3#=FyGBkr0
z{V{<?ds-N37+n}ZD@<yc!0Ss`{gROl0<l3D)Q&0!wT@~SvKWFHRx*NCX7U!L7M5lf
zr54Ah7bT`>GTve=Kyo?Q(Hu58iMdHBiFV5w85lrwhzwOS$)I9C0JM@y&n727IWec$
zP7kIG#d47EKz4#H2ld0!8EP0}g+L1y7cfF5*%vZSWGZ9{W&qo($qdP|ECogRDd3S+
zO{QB+dIm*jL75#Kl3)VtGA^5(%;J*d{M-V&6^slFMduk97^>v3yA7sLlNmJUm{XEk
zbc+jIc&C=678MsQ1GUDOZ?Wd)r=;fG5&%>22>DxVU^a5BLR^5bY#AsrL0)A5En{LV
zx&ew%koRFp78IeNd;*S8(6}h1-303Br!ZzS7ny<jm7rc`EjSs2y7Hh1UBHq8>NPMf
zWM*UlMQ;sw@kgROY-Jp@!ph7|cS<Zt&ahI*1TFed$j!{lOacigXt?F%Czgbyf>y<8
zf@+%7ijtzl<Prt&awBjhl#*JOnGBkePRj=`p@Yn%f(vt~@!%CdPz%6pWD9)q^U{&b
zPXgC6pyghW#d`U9sk#~9negO%P%{Q%F05MeN0v=Zfy|yGnGUWiQwvKIa}+=gV1>kD
z1(;vKZiiOo`FRS(iMgo?5U+r(01wG&vVv35EzZ)s;=<C@)T-1Xdr)rX0<Wr!&&kYA
zy~R<SoLEwlS`@Fze2W8ScoY}dpI|>n@#W{GX5^Q+=0SV_PO1<BoD4ukynkM*2SUXf
zP-+0p*D$aNfLq#pj4X^rcR^_alm=jF0@Q_s7h^M^GdhqZJ|zrUOf{gDrp&dhpw%Uy
zp=@bTTbZSXA%(ewwT7`7w1SnTm!+1ihN<XQ3TrKU4Vw!?Y!7G}HHN8{vlcW+#I}$j
zg-w#7mb->K1=JRAs{zf~FxGITux2xX%&g%8iGx__ObZ#s8EV-JooYB2urFjtVF$Oq
z*=x8KFoRa5EMNxpCKoWLu)@_r<k-O#4F{Oy1d~WA7H}+NsAbP<E8$$gmBO0NxR4R*
z&Jw6Qd1|>pt72-nKq?{R0v?d7#TgbdrLame)bc`xgG+d8c$*osStl?SIn}VIFhKaN
zOsou&3^lyw49pBR427?1c)>F4&5TePc8E+3Z!LS_s}j&~8T&%U8g_74GA!V)VO_`w
znrT`f2wEFc%UZ*-Kq!T)M7V|(RAqpA&o!VHJGY-E8#qCMYloMhlDTL#s7wNn!$1~S
zg2!8Lu|bj`SXPte7Px&;oLX{=3!EL}L5Uc&gaN#~{T4T<Spi!93@U^`r8LBjdmxiR
zhJtOYlCZT?2nUa$LYBBXyXxlT7Z+P8Xo^5mGaF>l>n-*)P+m7S0+(k+U`K+I8zc)b
zr)1_9d4o&_cU5PAvH@pCW^qY=dQoC7coheDQ12E?ZfauQEiTZKSI|0!g34R0U~_LV
zmE_-I0abC}YEhG~=mBWq1++nznNkrYmROVuUWZlekH|2oDMj}|cCqE;r>CbD-C{3?
zc;yxss3nyHUjBcJ3+|BOTdd%aD!K#G#8I3IDcGWTAnh}V@wXV`qj-u_(>>tM2UiPV
z0-Pm<(^E^JLFSfU<m?IxuU()_X#&c7Ok#{Y%py!Ij4X^Cj2w(Sj2w(AOgxM-OiD~#
zOduWylLC_vqZE?_BO4PRqX?4_QxRz8OfqtT4hmUNn+IH=gGS9DgF2u^`Poc03=5eS
zFsCprWXxiz1<lhiW-~8f1@UV^1BFc4OrZ8jQ3hy=G=;eYG!McF%I?`r3mF%1Ed;k!
zKnXB~6*M3S+EGyhE)GD0N8p8LjM>bR;6>N9EHz9uENP&XrEFC%z$Kq^er`c&NvZ-U
z9-##)s4J;ZoSa{jS`2FVL%KZ(K}ZikLBl9TQ$YjNugokd25X0Qs9>cv4imwJw7RaL
zx&mkcO=@v!9=L}AZZGFlD&%Gs7lRf%gG*Ee4Ui$=XmrU;P7MaTNmB=Gn*wMgBN5bF
z1Jy@h=R%Z&hF%hLazGj&E-I1-C1g;Jsgj6RRtR?t^7Hg_S5S^s0Gq5)tZAhH&P!G9
z3VHb@3I#=AS1BZxD1gk*%max)*WVO_D-y`MYu%Ef(vl2N5G6w!yr4u{oLZu&$qjCr
zASQ5&UVsuP3rHT4-xNVkdIKWB&0mhhqIAeoC-$@)Pz95x$$E<m6y^{=7rh1L9$_q@
z5hVf+sd)F0`21vuJCak2qxeBekwOAe10m8ps2cK3P0Ry@62yvUpcD_PWEfbb82K1^
z7&Sn1jZ8&9K@|~lT1J`G2m=lFfEV|J8Yj?Z3X>!Q6GJT%EcF&eq<}X+q%dZ)+AyRr
z)_^uiFlIB@fZM0wbj|>7(0~>OFoIIHIYTXTo(^c?2uwDMxr8N)HJhoZ3e*w=i!$bc
z<_H-f>>AJ<EvO&ER0}eT4YX=#A*kKUp2Y#l4H67#OeO4Dte`Chp!vF5mJ-eyre@Ga
zj0v@@paFW&P!wp3ha_mgp0S1%iO*ibnZi`V4Bn2AP|H!nR>P6a1j>80oS+#=C>vCX
zaxY}4;RMY@iZg(lYVT6mYB+1SL>NF~A`CTLAR4AZngLXOfj84|fZUV@+S>50@IVb{
zV-0)aWca`jawY|(3Z(1_UHX%n0&0$>LQ*CqBNyv|bGV;>h^v(XD58;c!LnI;2~@2@
zd1g+ILQ<+iYDIERX-aAeIH4yNr7DzUloliMYHEr?Ql)~fA!w+kG*uxrucRn571RM&
zfaLqs6a|C>6><|xiZUw{k=y~@ospaY+Lls`l(Rt1O#^+XxnKi9yB{FiwiJ>Ri&G({
zKpUA*UuWj$St&SGI9Gr+RA^+TDHyqED&!ZzgeqJVK!V0DnhHoQTLq2e#G+IyWV<sG
zi@{mHG%qE!C_O(fRROd*6TI|KAu+imvkW}mrl_f)k(XbRYNg<8YX}N%1*r3iQ`2)n
z`z%092^C5bi?bmG57e!&YKa_|Lh43cSZfmMIYUhUf`yR$i)d(r1K$b~J5~yQpvC>r
zA{=SYiBpBMt|9WK5_me$M2b(yNEWy+4=ykvB`avriyvfF3j>3LCNHE3!47Rh-(pKF
zO3zKK0JnI+O^~9e;KHyhGqoI2OoF$PfGXRfSD+FR)WN#Nl39|PtH}+SQ35Siy~PQx
z>cQ*Xcp>$>Gh}nkEq-tj2uk6QqU;u9(k%{9SqPd$y2S=!xrf|hOuoel-i`wnWd}3e
zL(mIG-t^QGgh^jPg(9eN#lR`W1giX@3n-bmKr>I^1^^F}45*-F6kq}snmkM*j3!J)
z9E=PMA<3X_9H<Ea!k`8|yafQ7#HwK`5&@NkC6LmP3A8>kg$cU#4m`-ckg=8lJYWoJ
z2s47VO@XEjL2ET?7#DzA2B38tjG%H6v>`8r5nRHtWU+xuI7SIj2Y?+kpzOjBdkH)R
z3EGXz04?FzYZz;g`5YzeDU3BN;1X_8EoTiUsL@}%u7(rT<S%+r!m@y)hP{R}n+Zgw
zFp4u|Gc9BSEzPUt0?ToL<v2hKt&rrn^CUp(v!N<$z^cHyK#R6OZ5Q@z=Av0C95q}u
z+#(F15><quh8slJaDhm125E4~3EFN5G8^Pa8-~JJg*o7+Hm4sr9cnUxsUpxiB1D=5
zH;9WsyMQ1m_7|w*&I4Ju3F_Dtfi|5M{RJgCa4Q#_3_)qL2$U9YapmNvXC^1+#OD_w
z(q+*<h#}y`exPI-#a9B}+5%gtR|MWV3fhEI#0VN_04?1qVg|8TKm;p@U;`2CAOc(=
zg9&g;R2(Jo1!v}^=cEQCW){gYF)$Q^26jMe6a<(!n79~0ZBOK+C%`DfD96OZ$ivJ7
zN`PF93=GNOAqG(5V_;waZ9@SqTLZU18Tc3&YM3Ul75}MWhD=v8f>I%9BOa3^18B-R
zhO?Hfmc5n()HST-uH~uat>pu)AOh{0f~(-K6{r=g6{;1k6{!`i6{{78s$hYukf@cc
zm8zAlm8q4jm8+GnRe-8sg{x4kRjO64RjF02RjXC6)u`2ks$hew(5lt0)v493)vML7
zHK;YL1x*@(T*?ktVO(ocYg%hoYhG(nYgubmYYp}lWKA&0f8dEtkXQ**3G)J$8nzU+
zg-p$iDO}<VP!V>B2xRI7+>v8m0F&c~$RVi~fQX1Qr0_sm+(>3iK;)2AOF~4%8B%yL
zR4YQ{kW?!{M8p|V_%Kv!LFABBYePiD8B+K$R2xI&kW`yMM8p|V1Q4oIz#AVxK1dN-
zzzPiyF3=h&h-#<^Cx!?ILS!L$`WI}5Fx(9AIx7@2gb+HQrV1iNkjxN)n<0achw6~V
z5RpQNAekWwH$xR64>dytLqr)Nf@Fpm+zdU0Jk$(b3=tiK2$C7%a5Kyi@=!C(Fhood
zB1mRPz*1z2BvdD8V-RGVS_+<qc_3je&X6Jv7vqJC$-u?<;9}fJsQ_lO2wWCnvM5{(
zVX_!p3}Lc3Tns5+z}+khmqnN?2Ny$_EDsk$n5+O7L&`&Nlhxp|2$R*}VhEEp;9>}q
zHQ{1N`3-KeK3o=IvH@HSVX`4y3}LbnTns61!cDe-%OXs+go`0ewt|ZxOtyxL!SXT7
z1h%3-HLNMJ;taJmprs%SIBIMbGEQJE{!{{5ieUpvK_$!!xN2-t<ir_L<i#0Em?67X
zB^f|zE{kU&<3iA;C4?FU6gA9}3|Q1CqNriPszwP#4J%eP$|!2su&Pl(QNxZ^jVf3T
zZw*@we~m<qVvSafaSd0EaE(ljYK>lvc^cH0HH;H@i~fKHM<Kq)Q{#b;MS#~d*lIkG
z3j8%5NCo~H52ONrjR#UesK!$R#WJ`Q9<BS(Yk2H+pE^9mM(aMNW=5nsXRy?L8t@o`
z)lj2#AE-?W3ysmbj|E(ogX_K&O|<%t2S@#<1+FJRr#67fRVJ);i8hKF?Dd}xiW=<o
zpDv0T?Dd}>iW=<opFWBj?Dd}kSPjAY54<qg(61^_84eU&QcF_7dtntc63bGH64O&b
z17yjW#hLke3fc;3;1Tv>P4KWF%6L~IXxKS188iy0k)M>6np~orlUkOV10FZhv{Hau
zp$zGUgIDFir^1TB^H-owdX<5TYly3Jh^N1wLXc~4s82|+g0HJ%aA=ULi-J?6f@7F#
zkfXb+LO_tKvuCiUzn@i=uYsN^Xa$*po+*?zgK*8DTyqH59LlwTa4n!*O9dNUg^-M*
z)Zz@#va~7=9R<*sxRpW`kAi}YuC4-P(`A(mgy)`<pPO1#nv<!Zk(rmAqo+_(npUD&
z#RZWKDNQS>;)bxpQZw_4tE3<t$D)$dqDq*aoYLIXJk2U06gjY9l_!4xyXgiy`v<uO
z>nH@dIy?IK=paHuLBrS6Pfx+rKvlsrKUCAI%EV0<9A8!n)wK#5L8-}!IXNJ<jzU0D
zDkM%pTulXtGob!-Ax#&K@Gv6H6mYm1lcom}zD}fR!Ws`&WSIepFRNfzcVAb(5J#Lz
z*5Ap?)j34h$2H8=N5RF@*;N4&g;rH;(1gaWqYzw@Vx>^UuA|_anFnPgR#+)iiRcHG
zCgzo7mL!&BmZd^>%Kc*0uY!*_C8z4;6;xL7r03-4g2pT%JW#5H^LWcZNfFK!NCYLu
z%+zAI2rt+#aIR>43alJ}+6XpO9+HVc2Ni^bIKs21jV?5ISNT#S?Sf|XauO>wt*Y!m
zwonkfocd0g`31$9sl`=7CD8G?<ka|(5J#|g1YjYC5P^jnTtpBaa&RFL<j{kQ3c^DW
zE+mN&iZCfnz9P`9-z`>f_5{z3L&m@%>&DXZi$GH*$G`*R1&JjYx7boEGK))!Z*k-%
zW~ZiP78T!O0qu~0Y|sK7xNwWLxFj(-Thlg5C^07oJS6T~mY7qTSdw2<93`1pP>@pz
zTICs%nVafZikPQ}5-upp%q#KF%c%?raV&!D|1JX0GYWug08bP^)4(lms66N#gDCz2
zSQ%367!XpL6eW;YmL3lm1c~s%dBq?uR}r`bg0P`A2}oQJq!Bb-6b}v#uuySHN<5lS
zZe|{aP+|p|kO0C;#{f{#6a_XNE(#U`Z9{<+f5l)vH`HMe4%B5}ju5KTAY!O)gNUIz
z4l0J?I<Odu^FU=Dn)ARyNX`TEVa|hapw0tx(3}SmLv<cR4Aps1F%;*4#X!ebKm*1x
z09?FAfvtcCA4Ci(>LCI=&;W$;piYGFgi(VKDv0Vxs358<VS*^mga``39aQWX04dR<
z!1ln!p@PWa4Jz|0qj;fCg(~2Mx)sV5L3J!l6xFpbQB>!`MN!-f6-5bHA5i6w7O)U8
zq=1D8zycP^gE|(%1A7>eN1%eJPK64hx)dge;!ubn=qw3P?na*v;{lfv&~m^QG!L-^
zG^4=C23m8>s04$c83{%`B*?|+f`Ck1j8O;(-dxAW$i<|>2!~8Y%xo~TKs4AqE+q5x
z81b73I*1U}#3B(!28NZ4xA;L5jq!=a@#UF$Df#7~gFo3pOGZnRLFbFIhJe<(gQt&H
zGT&k^0Ih>eEe0+4EJ@CQtiWO^E=eswnal%C?6JU(nFG!2fhO^47;BiCnS&XMPJ?EY
zL_yO#k{|*+y#pq|b37biD>Bn6mw*Bjys(0?NEUQx8&i=KBLf5YSewkMRFw5YpqU%c
zYz}z##sj?guZAHDw5*RYo2gjHi=m0J26XBiQ!R4|Qx<a#V-4s~2GDVzENFaIkiHm}
zS~ke+4rq2xgrSC6f}sYy|5KWwmJ77HL4=`(NrIt<ErnTvL7JhKyM(odtC_KeE1Rjf
z0c2YVYYlfZV-0sUQ}F~Sp9^#t7DT+38?@#@f}xhDhNFgC0<<iTt%M!4$F+nVv|AfI
z%UBb`RLfV(U&BztSHllFOSV>^hNXtDhF1h60wN(>*7$39L>OuyBqwMSTN-2;p8#~R
z9xL&BQkWrn3J-zSTu6gfc(eFHdLW=lY;Y>j<N&X1D*|s|M$8L>k8c3)fEEWuDrkmK
zQxF_sMRJS`3{lLP@fjeZ{1z*CRqZYIl+3cs;{2j0<~)#0UVQm2<^m9_Aig|`IUhn~
zL@^hGSjF)f;6w9nF=pLj1t$>ne8^Rr2Nusvt6UCBI4YoA$RxzX0X~Y8k6DCKgc)=w
zCkOa^1P&%CMjl2JMl+@&6_9g5{VV7U{SSMTqzqck1D}@$^~FjUK&!16GJ;m`fc7eb
z7HNVuBSKF}U|9&7S7yj!1%+Tb6KG2y=;)6cMo>ygVFB&PuVG4I5oW06NMTK30gXkA
zFw}5}Fi3+=7~m*j$l|PFt6`U50I7lK=4MD?gQx(__%37=VF1lPmoO~gtl?b9C=KQ<
z0G(C<O4p1FSwON2K<nhdVjzAAcNX|SGElltVNYR8;pk<n<pQO_EZ%s&6!sdfEZ%tj
z8m?dlO-?`X1hFP#(KAps1$Rb46T^^Gen8z)H%+!%+?jdtpd~7KsX4{Bc=Ah25IpYe
z)S|r997xf?23l`ZP<)FeDKoJcRC=U<RxN7^fY&*IRz84MxqyzlEJ#f(0Z%pGVk{{V
z0%dcSTu}3lsUY(fb81BaWK|ew`NJ)yyu4d%&iQ#|Mk%-0AnPBCz^8DX0Zlz~-{OEd
ztVjc74r_5{dT!z^wu*Sr)-Mp9UX+<~i>)#qL`Sh#Kp2n}XdGpUMVX*e6r=dl6HAMW
zGZXXTA^y3A5J<}et$9aJfYPA7DH(}*d8s+Ux%v5^MSCDe`XpAS7F`3SNF7j$WaeTN
zV&q^1oixS4D!|CXD8|V4pY1=>e-<`ASo-|W#tcfOMW7Q~Z*j!O=YqC>#cMJa#eqDC
zp8~Do(_{?MWQ62ZCdl-@DyaB$MOv^5n%=*~Ra}|`J)#S;(*)G4E;<5=c70ID@fIg%
z7F6oxrGPUm_!xv+T)BxQpu@#-GLyiYHg5?c1oR3jK~lHaAT;>Ik6QxZ(@^z5+dYc)
z!0Y=!6~iqFbYZ<>$SKuDprxR<I3Ut_c}1XI=TTB9F44<PO)M@gN(C(`DlP&Q`%w~T
z3i68zGV*is)1jwOfimzd<^WHhB2JL+o`4AO;SHd34ZsH;gU{#%pO*?g7_$g;dL-oJ
zBhblS;4__yK!*r{k5K{dTP^|}XHW#%@DAQp4c_z#-dqIUVFTWT0^Ya)Ua1aV@d{ol
z2wwV91X_Lou79I=inGDN3l5<oF;KX3f)0=@$<NOzE&`RMx7bQ53o`T4izGmj?0G5R
z9d?kDM?v)>qy&Q{mb}E=)cE*Ytl(w|QW{07U~O{qQ%ZAE?Lf73F=#U_Y~3!H1})<O
zt&?En0d00=;$g;f-V84XCkGD)ABPABKL;C^E{6~YKbJEH7nd5B5El;z7l$~94i_s2
g3kMqqABPSHKZholJVz2_nGg??9J7=@mogV40F{K{cK`qY

literal 0
HcmV?d00001

diff --git a/binary_fill_holes_benchmark.py b/binary_fill_holes_benchmark.py
new file mode 100644
index 0000000..3ee1649
--- /dev/null
+++ b/binary_fill_holes_benchmark.py
@@ -0,0 +1,35 @@
+from scipy import ndimage
+import numpy as np
+import cupy as cp
+from cupyx.scipy.ndimage import binary_fill_holes
+
+from timeit import default_timer as timer
+
+np_start = timer()
+
+a = np.zeros((5000, 5000), dtype=int)
+
+a[1000:4000, 1000:4000] = 1
+a[1200:3800,1200:3800] = 1
+
+np_start = timer()
+
+b = ndimage.binary_fill_holes(a).astype(int)
+
+np_end = timer()
+print('np_time: ', np_end - np_start)
+
+print(b)
+
+with cp.cuda.Device(1):
+    c_a = cp.array(a)
+
+    cp_start = timer()
+
+    c_b = binary_fill_holes(c_a).astype(int)
+
+    cp_end = timer()
+
+    print('cp_time: ', cp_end - cp_start)
+
+    print(c_b)
\ No newline at end of file
diff --git a/environment.yml b/environment.yml
index f2993dd..dc797de 100644
--- a/environment.yml
+++ b/environment.yml
@@ -1,4 +1,4 @@
-name: python37
+name: flash
 channels:
   - pytorch
   - defaults
diff --git a/extractFGpatches.py b/extractFGpatches.py
new file mode 100755
index 0000000..a19e23f
--- /dev/null
+++ b/extractFGpatches.py
@@ -0,0 +1,333 @@
+import numpy as np
+import os
+import sys
+import logging as log
+from PIL import Image
+from scipy.ndimage.measurements import label
+from skimage.color import rgb2gray
+import openslide as osl
+from openslide.lowlevel import OpenSlideError
+from skimage.transform import rescale, resize
+from scipy.ndimage import zoom
+import matplotlib.pyplot as plt
+from skimage.segmentation import flood, flood_fill
+from skimage.color import rgb2gray
+from skimage import filters
+from scipy.ndimage import binary_dilation, binary_closing, binary_fill_holes, binary_erosion, binary_opening
+import scipy.ndimage
+from skimage.measure import regionprops
+from tifffile import imread
+import shutil
+import cv2
+
+from utils import generate_ball, patchify, unpatchify, saveOverlayResults, savePredictionOverlayResults, savePredictionResultsWithoutDilation, saveImage, savePredictionResults, saveRGBPredictionOverlayResults, convert_labelmap_to_rgb_with_instance_first_class
+
+from joblib import Parallel, delayed
+import multiprocessing
+from ome_types import from_xml
+
+
+def writeOutPatchesRowwise(x):
+    for y in range(extractedPatches.shape[1]):
+        if extractedSegm[x, y].sum() > (minFGproportion * imageResolution * imageResolution):
+            Image.fromarray(extractedPatches[x, y, 0]).save(
+                resultsPath + '/' + WSIpath[:suffixCut] + '_' + str(x) + '_' + str(y) + '_' + str(i + 1) + '.png')
+    return
+
+
+stain = 'NGAL' #PAS, aSMA, Col III, NGAL, Fibronectin, Meca-32, CD44, F4-80, CD31, AFOG
+# WSIfolder = '/work/scratch/bouteldja/Data_ActivePAS'
+# WSIfolder = '/images/ACTIVE/2015-04_Boor/Nassim_2019/kidney histology'
+# WSIfolder = '/images/ACTIVE/2015-04_Boor/Nassim_2019/kidney IHC'
+WSIfolder = '/homeStor1/ylan/data/MarkusRinschen/debug_2/'#'SPECIFIED WSI FOLDER'
+
+imageSizeUM = 216
+imageResolution = 640
+strideProportion = 1.0
+minFGproportion = 0.5
+
+
+# resultsPath = '/work/scratch/bouteldja/Data_StainTranslation/'+stain.replace(" ", "")
+resultsPath = WSIfolder
+resultsForegroundSegmentation = resultsPath + '/FGsegm'
+
+if os.path.exists(resultsForegroundSegmentation):
+    shutil.rmtree(resultsForegroundSegmentation)
+
+onlyPerformForegroundSegmentation = True
+
+saveWSICoarseForegroundSegmResults = True
+saveWSICoarseForegroundSegmResults_RegionSeparate = False
+saveWSICroppedForegroundSegmResults = False
+alpha = 0.3
+figHeight = 20
+
+targetSpacing = imageSizeUM / imageResolution
+shiftUM = imageSizeUM // 3
+
+struc3 = generate_ball(1)
+struc5 = generate_ball(2)
+struc7 = generate_ball(3)
+
+if not os.path.exists(resultsPath):
+    os.makedirs(resultsPath)
+if not os.path.exists(resultsForegroundSegmentation):
+    os.makedirs(resultsForegroundSegmentation)
+
+# Set up logger
+log.basicConfig(
+    level=log.INFO,
+    format='%(asctime)s %(message)s',
+    datefmt='%Y/%m/%d %I:%M:%S %p',
+    handlers=[
+        log.FileHandler(resultsForegroundSegmentation + '/LOGS.log', 'w'),
+        log.StreamHandler(sys.stdout)
+    ])
+logger = log.getLogger()
+
+print(os.listdir(WSIfolder))
+
+files = sorted(list(filter(lambda x: ('.ndpi' in x or '.svs' in x or '.tif' in x) and stain in x, os.listdir(WSIfolder))))
+files = ['p21_kidleftp456_B10_PAS_3.ome.tif']
+# files = sorted(list(filter(lambda x: '.ndpi' in x or '.svs' in x, os.listdir(WSIfolder))))
+logger.info('Amount of WSIs in folder: ' + str(len(files)))
+
+num_cores = multiprocessing.cpu_count()
+
+detectedRegions = []
+
+try:
+
+    for no, WSIpath in enumerate(files):
+        # Load slide
+        ## .ome.tiff
+        # imgRGB = imread(os.path.join(WSIfolder, WSIpath))
+        # spacings = np.array([0.2197, 0.2197])
+
+        # resize_x = 500 / imgRGB.shape[1]
+
+        # resize_y = 
+
+        # imgRGB = np.array()
+        slide = osl.OpenSlide(os.path.join(WSIfolder, WSIpath))
+        if '.tif' in WSIpath:
+        # slide = osl.OpenSlide(os.path.join(WSIfolder, WSIpath))
+            x_spacing = from_xml(slide.properties['tiff.ImageDescription']).images[0].pixels.physical_size_x
+            y_spacing = from_xml(slide.properties['tiff.ImageDescription']).images[0].pixels.physical_size_y
+            spacings = np.array([float( x_spacing), float(y_spacing)])
+            suffixCut = -8
+        else:
+        # # Extract/print relevant parameters
+            spacings = np.array([float(slide.properties['openslide.mpp-x']), float(slide.properties['openslide.mpp-y'])])
+            if WSIpath.split('.')[-1] == 'ndpi':
+                suffixCut = -5
+            else:
+                suffixCut = -4
+        levelDims = np.array(slide.level_dimensions)
+        amountLevels = len(levelDims)
+        levelDownsamples = np.asarray(np.round(np.array(slide.level_downsamples)), int)
+
+        logger.info(str(no + 1) + ':  WSI:\t' + WSIpath + ', Spacing:\t' + str(spacings) + ', levels:\t' + str(amountLevels))
+
+        
+
+        # # specify used wsi level for foreground segmentation: min. 500x500 pixels
+        usedLevel = np.argwhere(np.all(levelDims > [500, 500], 1) == True).max()
+        logger.info('Used level for foreground segmentation: ' + str(usedLevel))
+
+        # extract image from that level
+        imgRGB = np.array(slide.read_region(location=np.array([0, 0]), level=usedLevel, size=levelDims[usedLevel]))[:, :,:3]
+        img = rgb2gray(imgRGB)
+
+        # foreground segmentation
+        otsu_threshold = filters.threshold_otsu(img)
+
+        divideSizeFactor = 30
+
+        if stain == 'PAS':
+            if '17.40.53' in WSIpath:
+                otsu_threshold -= 0.065
+
+        elif stain == 'aSMA':
+            if '22.15.27' in WSIpath or '22.28.25' in WSIpath:
+                otsu_threshold -= 0.07
+            if '20.19.57' in WSIpath or '16.59.43' in WSIpath:
+                otsu_threshold += 0.024
+
+        elif stain == 'CD31':
+            if '18.51.39' in WSIpath:
+                otsu_threshold += 0.02
+
+        elif stain == 'Col III':
+            if '21.25.37' in WSIpath or '21.42.09' in WSIpath:
+                otsu_threshold -= 0.1
+            elif '2172_13' in WSIpath:
+                otsu_threshold += 0.05
+            else:
+                otsu_threshold += 0.055
+
+        elif stain == 'NGAL':
+            if '21.40.59' in WSIpath or '21.35.56' in WSIpath:
+                otsu_threshold += 0.005
+            elif '18.04.45' in WSIpath:
+                otsu_threshold += 0.07
+            elif '21.46.20' in WSIpath:
+                otsu_threshold += 0.01
+            else:
+                otsu_threshold += 0.05
+
+        elif stain == 'Fibronectin':
+            if '23.03.22' in WSIpath:
+                otsu_threshold -= 0.08
+            elif '00.58.23' in WSIpath:
+                otsu_threshold += 0.02
+            else:
+                otsu_threshold += 0.05
+
+        elif stain == 'Meca-32':
+            divideSizeFactor = 50
+
+            if '1150-12' in WSIpath:
+                otsu_threshold -= 0.097
+            elif '22.36.35' in WSIpath:
+                otsu_threshold -= 0.065
+            elif '10.23.46' in WSIpath:
+                otsu_threshold += 0.05
+            else:
+                otsu_threshold += 0.02
+
+        elif stain == 'CD44':
+            if '11.22.14' in WSIpath or '11.28.21' in WSIpath:
+                otsu_threshold += 0.085
+            elif '11.41.12' in WSIpath:
+                otsu_threshold -= 0.06
+            else:
+                otsu_threshold += 0.015
+
+
+
+
+        img_mask = img < otsu_threshold
+        # img_mask = img < 0.78
+        logger.info('Utilized threshold: ' + str(otsu_threshold))
+
+        if stain == 'NGAL' and '18.58.25' in WSIpath:
+            img_mask[395:405,440:530] = 0
+
+        # extract connected regions only with at least 1/25 size of WSI
+        labeledRegions, numberRegions = label(img_mask, struc3)
+        minRequiredSize = (img_mask.shape[0] * img_mask.shape[1]) // divideSizeFactor
+
+        argRegions = []
+        for i in range(1, numberRegions + 1):
+            if (labeledRegions == i).sum() > minRequiredSize:
+                argRegions.append(i)
+
+        finalWSI_FG = np.zeros(img_mask.shape, dtype=np.bool)
+
+        # process these regions
+        for i, arg in enumerate(argRegions):
+            logger.info('Extract foreground region ' + str(i + 1) + '...')
+            detectedRegion = labeledRegions == arg
+            detectedRegion = binary_fill_holes(detectedRegion)
+            detectedRegion = binary_opening(detectedRegion, structure=struc7)
+
+            # extract biggest component
+            labeledRegions2, numberLabeledRegions = label(detectedRegion, struc3)
+            if numberLabeledRegions > 1:
+                argMax = np.array([region.area for region in regionprops(labeledRegions2)]).argmax() + 1
+                detectedRegion = labeledRegions2 == argMax
+
+            # detectedRegion = binary_erosion(detectedRegion, structure=struc3)
+
+            # Save foreground segmentation (on chosen coarse resolution level) as overlay
+            if saveWSICoarseForegroundSegmResults_RegionSeparate:
+                saveOverlayResults(imgRGB, detectedRegion, alpha=alpha, figHeight=figHeight, fullResultPath=resultsForegroundSegmentation + '/' + WSIpath[:suffixCut] +'_'+str(i + 1)+'_fgSeg.png')
+
+            finalWSI_FG = np.logical_or(finalWSI_FG, detectedRegion)
+
+            logger.info('Foreground segmentation done on coarse level...')
+
+            if onlyPerformForegroundSegmentation:
+                continue
+
+            # enlargement of foreground in order to fully cover border structures
+            # detectedRegion = binary_erosion(detectedRegion, structure=struc3)
+
+            # compute bounding box
+            temp = np.where(detectedRegion == 1)
+
+            bbox = np.array([np.min(temp[0]), np.min(temp[1]), np.max(temp[0]), np.max(temp[1])])
+
+            # compute how much to enlarge bbox to consider wider context utilization (especially for patchify)
+            downsampleFactor = int(levelDownsamples[usedLevel])
+            # downsampleFactor = int(resize_x)
+            shift = round((shiftUM / spacings[0]) / downsampleFactor)
+
+            # enlarge bounding box due to wider context consideration
+            bbox[0] = max(bbox[0] - shift, 0)
+            bbox[1] = max(bbox[1] - shift, 0)
+            bbox[2] = min(bbox[2] + shift, detectedRegion.shape[0] - 1)
+            bbox[3] = min(bbox[3] + shift, detectedRegion.shape[1] - 1)
+
+
+            bbox_WSI = np.asarray(bbox * downsampleFactor, int)
+            logger.info('High res bounding box coordinates: ' + str(bbox_WSI))
+
+            logger.info('Extract high res patch and segm map...')
+            try:
+                img_WSI = np.array(slide.read_region(location=np.array([bbox_WSI[1], bbox_WSI[0]]), level=0, size=np.array([bbox_WSI[3] - bbox_WSI[1] + downsampleFactor, bbox_WSI[2] - bbox_WSI[0] + downsampleFactor])))[:, :, :3]
+            except OpenSlideError:
+                logger.info('############ FILE CORRUPTED - IGNORED ############')
+                continue
+
+            segMap = zoom(detectedRegion[bbox[0]:bbox[2] + 1, bbox[1]:bbox[3] + 1], downsampleFactor, order=0)
+            # segMap = rescale(detectedRegion[bbox[0]:bbox[2] + 1, bbox[1]:bbox[3] + 1], downsampleFactor, order=0, preserve_range=True, multichannel=False)
+            assert img_WSI.shape[:2] == segMap.shape, "Error: Upsampling via Zoom/Rescale lead to unequal resolutions..."
+            logger.info('Done - size of extracted high res patch: ' + str(img_WSI.shape))
+
+            downsamplingFactor = spacings[0] / targetSpacing  # Rescaling very slow!
+            logger.info('Spacing of slide: '+str(spacings[0])+', Resample both patches using factor: ' + str(downsamplingFactor))
+            img_WSI = np.asarray(np.round(rescale(img_WSI, downsamplingFactor, order=1, preserve_range=True, multichannel=True)), np.uint8)
+            segMap = np.asarray(zoom(segMap, downsamplingFactor, order=0), np.bool)
+            # segMap = np.asarray(np.round(rescale(segMap, downsamplingFactor, order=0, preserve_range=True, multichannel=False)), np.bool)
+            assert img_WSI.shape[:2] == segMap.shape, "Error: Upsampling via Zoom/Rescale lead to unequal resolutions..."
+            logger.info('Done - size of extracted resampled high res patch: ' + str(img_WSI.shape))
+
+            # Save cropped foreground segmentation result as overlay
+            if saveWSICroppedForegroundSegmResults:
+                saveOverlayResults(img_WSI, segMap, alpha=alpha, figHeight=figHeight, fullResultPath=resultsForegroundSegmentation + '/' + WSIpath[:suffixCut] +'_'+str(i + 1)+'_fgSeg2.png')
+
+
+            ##### FOREGROUND SEGMENTATION DONE - NOW PATCH EXTRACTION USING PATCHIFY #####
+            logger.info('Perform patch extraction...')
+
+            extractedPatches = patchify(img_WSI.copy(), patch_size=(imageResolution, imageResolution, 3), step=int(imageResolution*strideProportion))  # shape: (5, 7, 1, 640, 640, 3)
+            extractedSegm = patchify(segMap.copy(), patch_size=(imageResolution, imageResolution), step=int(imageResolution*strideProportion))  # shape: (5, 7, 640, 640)
+
+            resultsLabel2 = Parallel(n_jobs=num_cores)(delayed(writeOutPatchesRowwise)(x) for x in range(extractedPatches.shape[0]))
+
+            # for x in range(extractedPatches.shape[0]):
+            #     for y in range(extractedPatches.shape[1]):
+            #         if extractedSegm[x,y].sum() > (minFGproportion * imageResolution * imageResolution):
+            #             Image.fromarray(extractedPatches[x,y,0]).save(resultsPath + '/' + WSIpath[:suffixCut] +'_'+str(x)+'_'+str(y)+'_'+str(i + 1)+'.png')
+
+            logger.info('Done.')
+
+        if saveWSICoarseForegroundSegmResults:
+            saveOverlayResults(imgRGB, finalWSI_FG, alpha=alpha, figHeight=figHeight, fullResultPath=resultsForegroundSegmentation + '/' + WSIpath[:suffixCut] + '_fgSeg_WSI.png')
+
+        detectedRegions.append(len(argRegions))
+        logger.info('####################')
+
+    logger.info('Detected regions of all processed slides:')
+    logger.info(detectedRegions)
+
+
+
+except:
+    logger.exception('! Exception !')
+    raise
+
+log.info('%%%% Ended regularly ! %%%%')
+
diff --git a/model.py b/model.py
index 0688d44..5bf7d0f 100644
--- a/model.py
+++ b/model.py
@@ -32,8 +32,9 @@ nonlinearity = partial(F.relu, inplace=True)
 ####################################################################################################
 # Custom represents our utilized and developed deep learning model. It is based on the U-Net architecture:
 # ----- Custom Unet 2D/3D - Pooling-Encoder + (Transposed/Upsampling)-Decoder + DoubleConvs ----- #
+# modelDim = input image dimension (2D or 3D image)
 class Custom(nn.Module):
-    def __init__(self, input_ch=3, output_ch=1, modelDim=2):
+    def __init__(self, input_ch=3, output_ch=1, modelDim=2): 
         super(Custom, self).__init__()
         assert modelDim == 2 or modelDim == 3, "Wrong unet-model dimension: " + str(modelDim)
 
@@ -391,3 +392,10 @@ class outconv(nn.Module):
 
 
 
+if __name__ == '__main__':
+
+    input = torch.rand((1, 3,640,640))
+    print(input.shape)
+    model = Custom(input_ch=3, output_ch=8, modelDim=2) 
+    output = model(input)
+    print(output.shape)
\ No newline at end of file
diff --git a/segment_WSI.py b/segment_WSI.py
index b27507e..5504bbe 100644
--- a/segment_WSI.py
+++ b/segment_WSI.py
@@ -30,12 +30,17 @@ from skimage.morphology import remove_small_objects
 from utils import generate_ball, patchify, unpatchify, saveOverlayResults, savePredictionOverlayResults, savePredictionOverlayResults_Fast, saveImage
 from model import Custom
 
+from pathlib import Path
 
 
 
-WSIrootFolder = 'SPECIFIED WSI FOLDER'
-modelpath = 'STRUCTURE SEGMENTATION MODEL PATH'
-resultsPath = 'RESULTS PATH'
+# WSIrootFolder = '/homeStor1/datasets/MarkusRinschen/'#'SPECIFIED WSI FOLDER'
+WSIrootFolder = '/homeStor1/ylan/data/Saskia_3D/debug/'#'SPECIFIED WSI FOLDER'
+
+# modelpath = '/homeStor1/nbouteldja/Results_ActivePAS/custom_train_val_test_e500_b6_r0.001_w1e-05_516_640_32_RAdam_instance_1deeper_Healthy_UUO_Adenine_Alport_IRI_NTN_fewSpecies_fewHuman_Background_+-1range_X/Model/finalModel.pt'
+modelpath = '/homeStor1/nbouteldja/Project_Human_Bios/Results/custom_train_val_test_e500_b10_r0.001_w1e-05_1024_1024_32_RAdam_instance_1deeper_+-1range_noArterialWeights_miceSpacing_InclJASNhumandata_weight10_3/Model/finalModel.pt'
+resultsPath = WSIrootFolder#'RESULTS PATH'
+
 
 if not os.path.exists(resultsPath):
     os.makedirs(resultsPath)
@@ -52,7 +57,7 @@ regionMinSizeUM = 3E5
 
 alpha = 0.3
 strideProportion = 0.5
-figHeight = 15
+figHeight = 26
 minibatchSize = 2
 minibatchSize_FG = 1
 useAllGPUs = False
diff --git a/segment_WSI_clean.py b/segment_WSI_clean.py
new file mode 100644
index 0000000..7c16888
--- /dev/null
+++ b/segment_WSI_clean.py
@@ -0,0 +1,474 @@
+import numpy as np
+import os
+import sys
+import cv2
+import torch
+import math
+import logging as log
+
+from openslide.lowlevel import OpenSlideError
+import openslide as osl
+from PIL import Image
+import matplotlib.pyplot as plt
+from scipy.ndimage.measurements import label
+from scipy.ndimage import zoom
+from scipy.ndimage.morphology import binary_dilation, binary_closing, binary_fill_holes, binary_erosion, binary_opening
+import scipy.ndimage
+import scipy as sp
+from skimage.transform import rescale, resize
+from skimage.measure import regionprops
+from skimage.segmentation import flood, flood_fill
+from skimage.color import rgb2gray
+from skimage.segmentation import clear_border
+from skimage import filters
+from skimage.morphology import remove_small_objects
+
+from utils_Clean import savePredictionOverlayResults_Fast, getFGsegmParam, generate_ball, patchify, unpatchify, saveOverlayResults, save_WSI_image, savePredictionOverlayResults, savePredictionResultsWithoutDilation, saveImage, savePredictionResults, saveRGBPredictionOverlayResults, convert_labelmap_to_rgb_with_instance_first_class
+from model import Custom, CustomContext3, CustomContext, UNetVanilla, CE_Net_2D, CE_Net_Inception_Variants_2D, StackedUNet
+from ome_types import from_xml
+
+
+
+
+# WSIrootFolder = '/homeStor1/nbouteldja/Bertram_McCullen/WSIs'
+WSIrootFolder = '/homeStor1/ylan/data/MarkusRinschen/debug_2/'#'SPECIFIED WSI FOLDER'
+
+
+
+modelpath = '/homeStor1/nbouteldja/Results_ActivePAS/custom_train_val_test_e500_b30_r0.001_w1e-05_516_640_32_RAdam_instance_1deeper_Healthy_UUO_Adenine_Alport_IRI_NTN_+-1range_CycleData/Model/finalModel.pt'
+
+# resultsPath = '/homeStor1/nbouteldja/Bertram_McCullen/Segmentations'
+resultsPath = WSIrootFolder
+
+
+if not os.path.exists(resultsPath):
+    os.makedirs(resultsPath)
+
+
+patchSegmSize = 516
+patchImgSize = 640
+patchLengthUM = 174.
+
+alpha = 0.3
+strideProportion = 0.5
+figHeight = 26
+minibatchSize = 52
+useAllGPUs = False
+GPUno = 0
+device = torch.device("cuda:" + str(GPUno) if torch.cuda.is_available() else "cpu")
+
+tubuliInstanceID_StartsWith = 10  # => tubuli instances start with id 10
+
+ftChannelsOutput = 8
+applyTestTimeAugmentation = True
+centerWeighting = False
+centerWeight = 3.
+
+makeBGwhite = True
+
+saveWSICoarseForegroundSegmResults = True
+saveCroppedForegroundSegmResults = False
+saveMedullaCortexBGSegmResults = True
+saveWSIandPredNumpy = True
+
+
+
+TUBULI_MIN_SIZE = 500
+GLOM_MIN_SIZE = 1800
+TUFT_MIN_SIZE = 500
+VEIN_MIN_SIZE = 3000
+ARTERY_MIN_SIZE = 400
+LUMEN_MIN_SIZE = 35
+
+# parameter for FGsegm
+regionMinSizeUM = 1E7
+
+labelBG = 8
+
+model = Custom(input_ch=3, output_ch=ftChannelsOutput, modelDim=2)
+
+model.load_state_dict(torch.load(modelpath, map_location=lambda storage, loc: storage))
+model.train(False)
+model.eval()
+
+if useAllGPUs:
+    model = torch.nn.DataParallel(model)  # multi-GPUs
+model = model.to(device)
+
+segmentationPatchStride = int(patchSegmSize * strideProportion)
+targetSpacing = patchLengthUM / patchSegmSize
+
+shiftMinUM = (patchImgSize - patchSegmSize) // 2 * targetSpacing + 2 # + 2 just for sufficient margin reasons
+shiftMaxUM = ((patchImgSize - patchSegmSize) // 2 + patchSegmSize * strideProportion) * targetSpacing
+
+# Set up logger
+log.basicConfig(
+    level=log.INFO,
+    format='%(asctime)s %(message)s',
+    datefmt='%Y/%m/%d %I:%M:%S %p',
+    handlers=[
+        log.FileHandler(resultsPath + '/LOGS.log', 'w'),
+        log.StreamHandler(sys.stdout)
+    ])
+logger = log.getLogger()
+
+struc3 = generate_ball(1)
+
+try:
+    for dirName, subdirList, fileList in os.walk(WSIrootFolder):
+
+        fileListWSI = sorted([fname for fname in fileList if (fname.endswith('.svs') or fname.endswith('.ndpi') or fname.endswith('.tif'))])
+
+        if len(fileListWSI) != 0:
+            logger.info(str(len(fileListWSI)) + ' WSIs to be analyzed in directory: ' + dirName)
+
+            resultsDir = resultsPath + dirName[len(WSIrootFolder):]
+            resultsDirNPYfiles = resultsDir + '/npyFiles'
+            if not os.path.exists(resultsDirNPYfiles):
+                os.makedirs(resultsDirNPYfiles)
+
+            for no, fname in enumerate(fileListWSI):
+
+                # Load slide
+                slide = osl.OpenSlide(os.path.join(dirName, fname))
+                if fname.endswith('.tif'):
+                    x_spacing = from_xml(slide.properties['tiff.ImageDescription']).images[0].pixels.physical_size_x
+                    y_spacing = from_xml(slide.properties['tiff.ImageDescription']).images[0].pixels.physical_size_y
+                    spacings = np.array([float(x_spacing), float(y_spacing)])
+                else:
+
+                
+
+                # Extract/print relevant parameters
+                    spacings = np.array([float(slide.properties['openslide.mpp-x']), float(slide.properties['openslide.mpp-y'])])
+                levelDims = np.array(slide.level_dimensions)
+                amountLevels = len(levelDims)
+                levelDownsamples = np.asarray(np.round(np.array(slide.level_downsamples)), np.int)
+
+                logger.info(str(no + 1) + ':  WSI:\t' + fname + ', Spacing:\t' + str(spacings) + ', levels:\t' + str(amountLevels))
+                logger.info('Spacings: ' + str(spacings))
+                logger.info('Level Dimensions: ' + str(levelDims))
+                logger.info('Level Downsamples: '+str(levelDownsamples))
+
+                suffixCut = -5 if fname.split('.')[-1] == 'ndpi' else -4
+
+                # specify used wsi level for foreground segmentation: min. 500x500 pixels
+                usedLevel = np.argwhere(np.all(levelDims > [500, 500], 1) == True).max()
+                logger.info('Used level for foreground segmentation: ' + str(usedLevel))
+
+                opening_radius, moreThresh = getFGsegmParam(fname, dirName)
+                logger.info('Initial opening_radius: {}, moreThresh: {}'.format(opening_radius, moreThresh))
+
+                # extract image from that level
+                imgRGB = np.array(slide.read_region(location=np.array([0, 0]), level=usedLevel, size=levelDims[usedLevel]))[:, :,:3]
+                img = rgb2gray(imgRGB)
+
+                # foreground segmentation
+                otsu_threshold = filters.threshold_otsu(img)
+                img_mask = img < otsu_threshold + moreThresh
+                logger.info('Utilized threshold: ' + str(otsu_threshold))
+
+
+                regionMinPixels = regionMinSizeUM / (spacings[0] * spacings[1] * levelDownsamples[usedLevel] * levelDownsamples[usedLevel])
+
+                labeledRegions, _ = label(img_mask)
+                labeledRegions, numberFGRegions = label(remove_small_objects(labeledRegions, min_size=regionMinPixels))
+
+
+                while numberFGRegions == 0:
+                    otsu_threshold += 0.02
+                    img_mask = img < otsu_threshold + moreThresh
+                    print('No region detected, utilized threshold now: ' + str(otsu_threshold + moreThresh))
+                    labeledRegions, _ = label(img_mask)
+                    labeledRegions, numberFGRegions = label(remove_small_objects(labeledRegions, min_size=regionMinPixels))
+
+                while (labeledRegions==1).sum() > 0.98 * img.shape[0] * img.shape[1]:
+                    otsu_threshold -= 0.04
+                    img_mask = img < otsu_threshold + moreThresh
+                    print('Whole image region detected, utilized threshold now: ' + str(otsu_threshold + moreThresh))
+                    labeledRegions, _ = label(img_mask)
+                    labeledRegions, numberFGRegions = label(remove_small_objects(labeledRegions, min_size=regionMinPixels))
+
+                logger.info('In total -> '+str(numberFGRegions)+' <- regions on WSI detected!')
+
+                # process these regions
+                for regionID in range(1, numberFGRegions+1):
+                    logger.info('#######\n Extract foreground region ' + str(regionID) + '...')
+                    detectedRegion = labeledRegions == regionID
+
+                    detectedRegion = binary_fill_holes(detectedRegion)
+                    detectedRegion = binary_opening(detectedRegion, structure=generate_ball(opening_radius))
+
+                    # extract biggest component
+                    labeledRegions2, numberLabeledRegions = label(detectedRegion, struc3)
+                    if numberLabeledRegions > 1:
+                        argMax = np.array([region.area for region in regionprops(labeledRegions2)]).argmax() + 1
+                        detectedRegion = labeledRegions2 == argMax
+                        if detectedRegion.sum() < regionMinPixels:
+                            logger.info('Region has gotten too small after opening!')
+                            continue
+                    elif numberLabeledRegions == 0:
+                        logger.info('Region vanished after opening!')
+                        continue
+
+                    logger.info('Foreground segmentation done on coarse level...')
+
+                    # compute bounding box and how much to enlarge bbox to consider wider context utilization (especially for patchify)
+                    temp = np.where(detectedRegion == 1)
+                    bbox = np.array([np.min(temp[0]), np.min(temp[1]), np.max(temp[0]), np.max(temp[1])])
+
+                    downsampleFactor = int(levelDownsamples[usedLevel])
+                    shiftMin = round((shiftMinUM / spacings[0]) / downsampleFactor)
+                    shiftMax = round((shiftMaxUM / spacings[0]) / downsampleFactor)
+
+                    # enlarge bounding box due to wider context consideration
+                    bbox[0] = max(bbox[0] - shiftMin, 0)
+                    bbox[1] = max(bbox[1] - shiftMin, 0)
+                    bbox[2] = min(bbox[2] + shiftMax, detectedRegion.shape[0] - 1) + 1
+                    bbox[3] = min(bbox[3] + shiftMax, detectedRegion.shape[1] - 1) + 1
+
+                    bbox_WSI = np.asarray(bbox * downsampleFactor, np.int)
+                    logger.info('High res bounding box coordinates: ' + str(bbox_WSI))
+
+                    # Save foreground segmentation (on chosen coarse resolution level) as overlay
+                    if saveWSICoarseForegroundSegmResults:
+                        logger.info('Saving coarse foreground segmentation results...')
+                        saveOverlayResults(imgRGB, detectedRegion, alpha=alpha, figHeight=figHeight, fullResultPath=resultsDir + '/' + fname[:suffixCut] +'_'+str(regionID)+'_fgSeg_({}_{}_{}).png'.format(bbox_WSI[0], bbox_WSI[1], spacings[0]))
+
+
+                    logger.info('Extract high res patch and segm map...')
+                    try:
+                        img_WSI = np.asarray(np.array(slide.read_region(location=np.array([bbox_WSI[1], bbox_WSI[0]]), level=0, size=np.array([bbox_WSI[3] - bbox_WSI[1], bbox_WSI[2] - bbox_WSI[0]])))[:, :, :3], np.uint8)
+                    except OpenSlideError:
+                        logger.info('############ FILE CORRUPTED - IGNORED ############')
+                        continue
+
+                    segMap = zoom(detectedRegion[bbox[0]:bbox[2], bbox[1]:bbox[3]], downsampleFactor, order=0)
+                    # segMap = rescale(detectedRegion[bbox[0]:bbox[2] + 1, bbox[1]:bbox[3] + 1], downsampleFactor, order=0, preserve_range=True, multichannel=False)
+                    assert img_WSI.shape[:2] == segMap.shape, "Error: Upsampling led to unequal resolutions..."
+                    logger.info('Done - size of extracted high res patch: ' + str(img_WSI.shape))
+
+                    downsamplingFactor = spacings[0] / targetSpacing  # Rescaling would be very slow using 'rescale' method!
+                    logger.info('Utilized spacing of slide: '+str(spacings[0])+', Resample both patches using factor: ' + str(downsamplingFactor))
+                    segMap = np.asarray(zoom(segMap, downsamplingFactor, order=0), np.bool)
+                    img_WSI = cv2.resize(img_WSI, dsize=tuple(np.flip(segMap.shape)), interpolation=cv2.INTER_LINEAR)
+                    # segMap = np.asarray(np.round(rescale(segMap, downsamplingFactor, order=0, preserve_range=True, multichannel=False)), np.bool)
+                    assert img_WSI.shape[:2] == segMap.shape, "Error: Upsampling via zoom/resize led to unequal resolutions..."
+                    logger.info('Done - size of extracted resampled high res patch: ' + str(img_WSI.shape))
+
+                    # Save cropped foreground segmentation result as overlay
+                    if saveCroppedForegroundSegmResults:
+                        logger.info('Saving initial cropped foreground segmentation results...')
+                        # saveOverlayResults(img_WSI, segMap, alpha=alpha, figHeight=figHeight, fullResultPath=resultsDir + '/' + fname[:suffixCut] +'_'+str(regionID)+'_fgSeg_tissue.png')
+                        saveOverlayResults(img_WSI, segMap, alpha=0.1, figHeight=figHeight, fullResultPath=resultsDir + '/' + fname[:suffixCut] +'_'+str(regionID)+'_fgSeg_tissue.png')
+
+                    ##### PREPROCESSING DONE - NOW: NETWORK SEGMENTATION PART #####
+                    logger.info('Start segmentation process...')
+
+                    # preprocess img
+                    img_WSI_prep = (np.array((img_WSI / 255. - 0.5) / 0.5, np.float32)).transpose(2, 0, 1)
+
+                    # patchify
+                    smallOverlappingPatches = patchify(img_WSI_prep.copy(), patch_size=(3, patchImgSize, patchImgSize), step=segmentationPatchStride)  # shape: (1, 5, 7, 3, 640, 640)
+
+                    # smallOverlappingPatches = torch.from_numpy(smallOverlappingPatches).to(device)
+                    smallOverlappingPatches = torch.from_numpy(smallOverlappingPatches)
+
+                    # calculate segmentation patch size since patchify cuts of last patch if not exactly fitting in window
+                    startX = (patchImgSize - patchSegmSize) // 2; startY = startX
+                    endX = segmentationPatchStride * (smallOverlappingPatches.size(1)-1) + patchSegmSize + startX
+                    endY = segmentationPatchStride * (smallOverlappingPatches.size(2)-1) + patchSegmSize + startY
+
+                    bigPatchResults = torch.zeros(device="cpu", size=(ftChannelsOutput, endX - startX, endY - startY))
+
+                    amountOfRowPatches = smallOverlappingPatches.size(2)
+                    gpuAmountRowPatchSplits = math.ceil(amountOfRowPatches / minibatchSize)
+                    gpuIDXsplits = np.array_split(np.arange(amountOfRowPatches), gpuAmountRowPatchSplits)
+
+                    for x in range(smallOverlappingPatches.size(1)):
+                        for k in range(gpuAmountRowPatchSplits):
+                            imgBatch = smallOverlappingPatches[0, x, gpuIDXsplits[k], :, :, :].to(device)
+
+                            with torch.no_grad():
+                                rowPrediction = torch.softmax(model(imgBatch), dim=1)  # shape: (7, 8, 516, 516)
+
+                                if applyTestTimeAugmentation:
+                                    imgBatch = imgBatch.flip(2)
+                                    rowPrediction += torch.softmax(model(imgBatch), 1).flip(2)
+
+                                    imgBatch = imgBatch.flip(3)
+                                    rowPrediction += torch.softmax(model(imgBatch), 1).flip(3).flip(2)
+
+                                    imgBatch = imgBatch.flip(2)
+                                    rowPrediction += torch.softmax(model(imgBatch), 1).flip(3)
+
+                                if centerWeighting:
+                                    rowPrediction[:, :, patchSegmSize // 4: patchSegmSize // 4 * 3, patchSegmSize // 4: patchSegmSize // 4 * 3] *= centerWeight
+
+                                rowPrediction = rowPrediction.to("cpu")
+
+                                for idx, y in enumerate(gpuIDXsplits[k]):
+                                    bigPatchResults[:, segmentationPatchStride * x:patchSegmSize + segmentationPatchStride * x,segmentationPatchStride * y:patchSegmSize + segmentationPatchStride * y] += rowPrediction[idx, :, :, :]
+
+                    bigPatchResults = torch.argmax(bigPatchResults, 0).byte().numpy() # shape: (1536, 2048)
+
+                    logger.info('Predictions generated. Final shape: '+str(bigPatchResults.shape))
+
+                    # Context margin + border patches not fully inside img removed
+                    img_WSI = img_WSI[startX:endX, startY:endY, :]
+                    segMap = segMap[startX:endX, startY:endY]
+
+                    # correct foreground segmentation including all touching vein prediction instances
+                    bgMap = np.logical_not(segMap)
+                    bigPatchResults[bgMap] = 4 #vein class assignment of bg
+                    temp = bigPatchResults == 4
+                    bgMap = np.logical_xor(clear_border(temp), temp)
+                    segMap = np.logical_not(bgMap)
+                    segMap = binary_fill_holes(segMap)
+                    bgMap = np.logical_not(segMap)
+
+                    # extract biggest fg component
+                    temp, numberLabeledRegions = label(segMap)
+                    if numberLabeledRegions > 1:
+                        argMax = np.array([region.area for region in regionprops(temp)]).argmax() + 1
+                        segMap = temp == argMax
+                        bgMap = np.logical_not(segMap)
+
+                    bigPatchResults[bgMap] = labelBG # color of label 'labelBG' => Purple represents BG just for visualization purposes
+
+                    if makeBGwhite:
+                        img_WSI[bgMap] = 255
+
+                    logger.info('Saving prediction and background overlay results...')
+                    save_WSI_image(img_WSI, resultsDir + '/' + fname[:suffixCut] +'_'+str(regionID)+'_imageWSI.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight))
+                    savePredictionOverlayResults(img_WSI, bigPatchResults, resultsDir + '/' + fname[:suffixCut] +'_'+str(regionID)+'_resultOverlay.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+                    if saveWSIandPredNumpy:
+                        logger.info('Saving numpy img...')
+                        np.save(resultsDirNPYfiles + '/' + fname[:suffixCut] + '_'+str(regionID)+'_resultWSI.npy', img_WSI)
+
+                    logger.info('Start postprocessing...')
+
+                    # remove border class
+                    bigPatchResults[bigPatchResults == 7] = 0
+
+                    # Delete BG to reduce postprocessing overhead
+                    bigPatchResults[bgMap] = 0
+
+                    ################# HOLE FILLING ################
+                    bigPatchResults[binary_fill_holes(bigPatchResults == 1)] = 1  # tubuli
+                    bigPatchResults[binary_fill_holes(bigPatchResults == 4)] = 4  # veins
+                    temp = binary_fill_holes(bigPatchResults == 3)  # tuft
+                    bigPatchResults[binary_fill_holes(np.logical_or(bigPatchResults == 3, bigPatchResults == 2))] = 2  # glom
+                    bigPatchResults[temp] = 3  # tuft
+                    temp = binary_fill_holes(bigPatchResults == 6)  # artery_lumen
+                    bigPatchResults[binary_fill_holes(np.logical_or(bigPatchResults == 5, bigPatchResults == 6))] = 5  # full_artery
+                    bigPatchResults[temp] = 6  # artery_lumen
+
+                    ###### REMOVING TOO SMALL CONNECTED REGIONS ######
+                    temp, _ = label(bigPatchResults == 1)
+                    finalResults_Instance = remove_small_objects(temp, min_size=TUBULI_MIN_SIZE) > 0
+
+                    ############ PERFORM TUBULE DILATION ############
+                    finalResults_Instance, numberTubuli = label(finalResults_Instance)
+                    assert numberTubuli < 65500, logger.info('ERROR: TOO MANY TUBULI DETECTED - MAX ARE 2^16=65k COZ OF UINT16 !')
+                    finalResults_Instance[finalResults_Instance > 0] += (tubuliInstanceID_StartsWith - 1)
+                    # finalResults_Instance = np.asarray(finalResults_Instance, np.uint16)
+                    # finalResults_Instance = cv2.dilate(np.asarray(finalResults_Instance, np.uint16), kernel=np.asarray(generate_ball(1), np.uint8), iterations=1) #RESULT TYPE: UINT16
+                    finalResults_Instance = cv2.dilate(np.asarray(finalResults_Instance, np.uint16), kernel=np.asarray(generate_ball(2), np.uint8), iterations=1) #RESULT TYPE: UINT16
+
+                    temp, _ = label(np.logical_or(bigPatchResults == 2, bigPatchResults == 3))
+                    finalResults_Instance[remove_small_objects(temp, min_size=GLOM_MIN_SIZE) > 0] = 2
+                    temp, _ = label(bigPatchResults == 3)
+                    finalResults_Instance[np.logical_and(remove_small_objects(temp, min_size=TUFT_MIN_SIZE) > 0, finalResults_Instance==2)] = 3
+                    temp, _ = label(bigPatchResults == 4)
+                    finalResults_Instance[remove_small_objects(temp, min_size=VEIN_MIN_SIZE) > 0] = 4
+                    temp, _ = label(np.logical_or(bigPatchResults == 5, bigPatchResults == 6))
+                    finalResults_Instance[remove_small_objects(temp, min_size=ARTERY_MIN_SIZE) > 0] = 5
+                    temp, _ = label(bigPatchResults == 6)
+                    finalResults_Instance[np.logical_and(remove_small_objects(temp, min_size=LUMEN_MIN_SIZE) > 0, finalResults_Instance==5)] = 6
+
+                    finalResults_Instance = finalResults_Instance * segMap
+
+                    logger.info('Done - Save final instance overlay results...')
+                    savePredictionOverlayResults(img_WSI, finalResults_Instance, resultsDir + '/' + fname[:suffixCut] +'_'+str(regionID)+'_resultOverlayFINALInstance.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+                    logger.info('Done - Save final non-instance overlay results...')
+                    finalResults = finalResults_Instance.copy()
+                    finalResults[finalResults > tubuliInstanceID_StartsWith] = 1
+                    savePredictionOverlayResults(img_WSI, finalResults, resultsDir + '/' + fname[:suffixCut] +'_'+str(regionID)+'_resultOverlayFINAL.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+                    finalResults_Instance[bgMap] = labelBG
+
+                    if saveWSIandPredNumpy:
+                        logger.info('Saving numpy final instance prediction results...')
+                        np.save(resultsDirNPYfiles + '/' + fname[:suffixCut] + '_'+str(regionID)+'_finalInstancePrediction.npy', finalResults_Instance)
+
+                    ################################################################################################################
+
+                    logger.info('Medulla/Cortex-Segmentation starts...')
+
+                    downscaleFactor = 25.
+                    MEDULLA_TRESH_UP = 1200000
+                    MEDULLA_TRESH_DOWN = MEDULLA_TRESH_UP // int(downscaleFactor * downscaleFactor)
+                    cortexRad = 75
+                    medullaRad = 58
+                    smoothMed = 35
+
+                    finalResults_down = np.asarray(zoom(finalResults, 1 / downscaleFactor, order=0), np.uint8)
+                    bgMap_down = np.asarray(zoom(bgMap, 1 / downscaleFactor, order=0), np.bool)
+
+                    finalResults_down = np.logical_or(finalResults_down == 2, finalResults_down == 3)
+                    finalResults_down = binary_dilation(finalResults_down, structure=generate_ball(cortexRad))
+                    finalResults_down = np.logical_or(finalResults_down, bgMap_down)
+
+                    medulla = np.logical_not(finalResults_down)
+
+                    temp, numberRegions = label(medulla, generate_ball(1))
+                    if numberRegions > 1:
+                        for i in range(1, numberRegions + 1):
+                            medullaMask = temp == i
+                            if medullaMask.sum() < MEDULLA_TRESH_DOWN:
+                                temp[medullaMask] = 0
+                        medulla = temp > 0
+                        # argMax = np.array([region.area for region in regionprops(temp)]).argmax() + 1
+                        # medulla = temp == argMax
+
+                    medulla = binary_dilation(medulla, structure=generate_ball(medullaRad))
+
+                    # smooth medulla on coarse level
+                    medulla = sp.ndimage.filters.gaussian_filter(np.asarray(medulla, np.float32), smoothMed, mode='nearest') > 0.5
+
+                    medulla = np.asarray(cv2.resize(np.asarray(medulla, np.uint8), dsize=(img_WSI.shape[1], img_WSI.shape[0]), interpolation=cv2.INTER_NEAREST), np.bool)
+
+                    medulla = np.logical_and(medulla, segMap)
+
+                    temp, numberRegions = label(medulla, generate_ball(1))
+                    if numberRegions > 1:
+                        for i in range(1, numberRegions + 1):
+                            medullaMask = temp == i
+                            if medullaMask.sum() < MEDULLA_TRESH_UP:
+                                temp[medullaMask] = 0
+                        medulla = temp > 0
+                    #     argMax = np.array([region.area for region in regionprops(temp)]).argmax() + 1
+                    #     medulla = temp == argMax
+
+                    kortex = np.asarray(np.logical_and(np.logical_not(medulla), segMap), np.uint8)
+                    kortex[medulla] = 2
+
+                    if saveWSIandPredNumpy:
+                        logger.info('Saving numpy kortex/medulla...')
+                        np.save(resultsDirNPYfiles + '/' + fname[:suffixCut] + '_'+str(regionID)+'_resultKortexMedulla.npy', kortex)
+
+                    if saveMedullaCortexBGSegmResults:
+                        logger.info('Saving kortex/medulla/bg segmentation results...')
+                        kortex[kortex == 0] = labelBG
+                        savePredictionOverlayResults_Fast(img_WSI, kortex, resultsDir + '/' + fname[:suffixCut] + '_' + str(regionID) + '_medullaKortexBG.png', figSize=(bigPatchResults.shape[1] / bigPatchResults.shape[0] * figHeight, figHeight), alpha=alpha)
+
+                logger.info('####################')
+
+except:
+    logger.exception('! Exception !')
+    raise
+
+log.info('%%%% Ended regularly ! %%%%')
diff --git a/segment_WSI_cupy.py b/segment_WSI_cupy.py
new file mode 100644
index 0000000..231059e
--- /dev/null
+++ b/segment_WSI_cupy.py
@@ -0,0 +1,541 @@
+# this file recursively performs the automated segmentation of WSIs by applying the tissue segmentation and structure segmentation CNN to all WSIs contained in a specified folder
+
+import numpy as np
+import cupy as cp
+import cupyx.scipy as sp
+from cupyx.scipy.ndimage import label
+from cupyx.scipy.ndimage import zoom
+from cupyx.scipy.ndimage import binary_dilation, binary_closing, binary_fill_holes, binary_erosion, binary_opening
+from cucim.skimage.morphology import remove_small_objects
+
+import os
+import sys
+import cv2
+import torch
+import math
+import logging as log
+from tqdm import tqdm, trange
+import re
+
+# from openslide.lowlevel import OpenSlideError
+import openslide as osl
+from ome_types import from_xml
+from PIL import Image
+import matplotlib.pyplot as plt
+# from scipy.ndimage import label
+# from scipy.ndimage import zoom
+# from scipy.ndimage import binary_dilation, binary_closing, binary_fill_holes, binary_erosion, binary_opening
+# import scipy.ndimage
+# import scipy as sp
+# from skimage.transform import rescale, resize
+# from skimage.measure import regionprops
+# from skimage.segmentation import flood, flood_fill
+# from skimage.color import rgb2gray
+# from skimage.segmentation import clear_border
+# from skimage import filters
+# from skimage.morphology import remove_small_objects
+
+from utils import generate_ball, patchify, unpatchify, saveOverlayResults, savePredictionOverlayResults, savePredictionOverlayResults_Fast, saveImage
+from model import Custom
+
+from pathlib import Path
+
+import nnunetv2
+import torch
+from batchgenerators.dataloading.data_loader import DataLoader
+from batchgenerators.dataloading.multi_threaded_augmenter import MultiThreadedAugmenter
+from batchgenerators.transforms.utility_transforms import NumpyToTensor
+from batchgenerators.utilities.file_and_folder_operations import load_json, join, isfile, maybe_mkdir_p, isdir, subdirs, \
+    save_json
+from nnunetv2.configuration import default_num_processes
+from nnunetv2.inference.export_prediction import export_prediction_from_softmax
+from nnunetv2.inference.sliding_window_prediction import predict_sliding_window_return_logits, compute_gaussian
+from nnunetv2.preprocessing.preprocessors.default_preprocessor import DefaultPreprocessor
+from nnunetv2.utilities.file_path_utilities import get_output_folder, should_i_save_to_file, check_workers_busy
+from nnunetv2.utilities.find_class_by_name import recursive_find_python_class
+from nnunetv2.utilities.json_export import recursive_fix_for_json_export
+from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels, convert_labelmap_to_one_hot
+from nnunetv2.utilities.plans_handling.plans_handler import PlansManager, ConfigurationManager
+from nnunetv2.utilities.utils import create_lists_from_splitted_dataset_folder
+
+from tifffile import imread
+# from aicsimageio import AICSImage
+from tqdm import tqdm
+import gc
+
+
+def load_what_we_need(model_training_output_dir, use_folds, checkpoint_name):
+    # we could also load plans and dataset_json from the init arguments in the checkpoint. Not quite sure what is the
+    # best method so we leave things as they are for the moment.
+    dataset_json = load_json(join(model_training_output_dir, 'dataset.json'))
+    plans = load_json(join(model_training_output_dir, 'plans.json'))
+    plans_manager = PlansManager(plans)
+
+    if isinstance(use_folds, str):
+        use_folds = [use_folds]
+
+    parameters = []
+    for i, f in enumerate(use_folds):
+        f = int(f) if f != 'all' else f
+        checkpoint = torch.load(join(model_training_output_dir, f'fold_{f}', checkpoint_name),
+                                map_location=torch.device('cpu'))
+        if i == 0:
+            trainer_name = checkpoint['trainer_name']
+            configuration_name = checkpoint['init_args']['configuration']
+            inference_allowed_mirroring_axes = checkpoint['inference_allowed_mirroring_axes'] if \
+                'inference_allowed_mirroring_axes' in checkpoint.keys() else None
+
+        parameters.append(checkpoint['network_weights'])
+
+    configuration_manager = plans_manager.get_configuration(configuration_name)
+    # restore network
+    num_input_channels = determine_num_input_channels(plans_manager, configuration_manager, dataset_json)
+    trainer_class = recursive_find_python_class(join(nnunetv2.__path__[0], "training", "nnUNetTrainer"),
+                                                trainer_name, 'nnunetv2.training.nnUNetTrainer')
+    network = trainer_class.build_network_architecture(plans_manager, dataset_json, configuration_manager,
+                                                    num_input_channels, enable_deep_supervision=False)
+    return parameters, configuration_manager, inference_allowed_mirroring_axes, plans_manager, dataset_json, network, trainer_name
+
+# WSIrootFolder = '/homeStor1/ylan/tissue_detection/Markus/'#'SPECIFIED WSI FOLDER'
+# WSIrootFolder = '/homeStor1/ylan/tissue_detection/Markus/p21_kidrightp789_B10_PAS'#'SPECIFIED WSI FOLDER'
+# WSIrootFolder = '/homeStor1/datasets/Cooperations/MarkusRinschen/'#'SPECIFIED WSI FOLDER'
+WSIrootFolder = '/homeStor1/ylan/data/MarkusRinschen/ometif/'#'SPECIFIED WSI FOLDER'
+modelpath = '/homeStor1/nbouteldja/Results_ActivePAS/custom_train_val_test_e500_b6_r0.001_w1e-05_516_640_32_RAdam_instance_1deeper_Healthy_UUO_Adenine_Alport_IRI_NTN_fewSpecies_fewHuman_Background_+-1range_X/Model/finalModel.pt'
+resultsPath = WSIrootFolder#'RESULTS PATH'
+
+
+if not os.path.exists(resultsPath):
+    os.makedirs(resultsPath)
+
+# model_FG_path = 'TISSUE SEGMENTATION MODEL PATH'
+
+patchSegmSize = 516
+patchImgSize = 640
+patchLengthUM = 174.
+
+patchImgSize_FG = 512
+patchLengthUM_FG = 1500 #2500
+regionMinSizeUM = 3E5
+
+alpha = 0.3
+strideProportion = 0.5
+figHeight = 15
+minibatchSize = 2
+minibatchSize_FG = 1
+useAllGPUs = False
+GPUno = 0
+device = torch.device("cuda:" + str(GPUno) if torch.cuda.is_available() else "cpu")
+
+tubuliInstanceID_StartsWith = 10  # => tubuli instances start with id 10
+
+ftChannelsOutput = 8
+applyTestTimeAugmentation = True
+centerWeighting = False
+centerWeight = 3.
+
+saveWSICoarseForegroundSegmResults = True
+# saveCroppedForegroundSegmResults = False
+saveCroppedWSIimg = True
+saveWSIandPredNumpy = True
+
+TUBULI_MIN_SIZE = 400
+GLOM_MIN_SIZE = 1500
+TUFT_MIN_SIZE = 500
+VEIN_MIN_SIZE = 3000
+ARTERY_MIN_SIZE = 400
+LUMEN_MIN_SIZE = 20
+
+labelBG = 8
+
+# LOAD STRUCTURE SEGMENTATION MODEL
+model = Custom(input_ch=3, output_ch=ftChannelsOutput, modelDim=2)
+model.load_state_dict(torch.load(modelpath, map_location=lambda storage, loc: storage))
+model.train(False)
+model.eval()
+
+if useAllGPUs:
+    model = torch.nn.DataParallel(model)  # multi-GPUs
+model = model.to(device)
+
+# LOAD TISSUE SEGMENTATION MODEL
+use_folds = [0]
+model_training_output_dir = f'/homeStor1/ylan/data/nnUNet/nnUNet_results/Dataset555_TissueDetection/nnUNetTrainer__nnUNetPlans__2d'
+checkpoint_name = 'checkpoint_best.pth'
+
+parameters, configuration_manager, inference_allowed_mirroring_axes, \
+        plans_manager, dataset_json, network, trainer_name = load_what_we_need(model_training_output_dir, use_folds, checkpoint_name)
+# model = network
+model_FG = network
+network.load_state_dict(parameters[0])
+
+
+# model_FG = torch.load(model_FG_path)
+model_FG = model_FG.to(device)
+model_FG.eval()
+
+segmentationPatchStride = int(patchSegmSize * strideProportion)
+targetSpacing = patchLengthUM / patchSegmSize
+
+targetSpacing_FG = patchLengthUM_FG / patchImgSize_FG
+
+shiftMinUM = (patchImgSize - patchSegmSize) // 2 * targetSpacing + 2 # + 2 just for sufficient margin reasons
+shiftMaxUM = ((patchImgSize - patchSegmSize) // 2 + patchSegmSize * strideProportion) * targetSpacing
+
+# Set up logger
+log.basicConfig(
+    level=log.INFO,
+    format='%(asctime)s %(message)s',
+    datefmt='%Y/%m/%d %I:%M:%S %p',
+    handlers=[
+        log.FileHandler(resultsPath + '/LOGS.log', 'w'),
+        log.StreamHandler(sys.stdout)
+    ])
+logger = log.getLogger()
+
+struc3 = generate_ball(1)
+
+# walk through directoy and its subdirectoy recursively
+for dirName, subdirList, fileList in os.walk(WSIrootFolder):
+
+    # print(fileList)
+    # filter WSIs in current directory
+    fileListWSI = sorted([fname for fname in fileList if (fname.endswith('.svs') or fname.endswith('.ndpi') or fname.endswith('.scn') or fname.endswith('.tif') or fname.endswith('.ome.tif')) and 'PAS' in fname])
+    print(fileListWSI)
+    if len(fileListWSI) != 0:
+        logger.info(str(len(fileListWSI)) + ' WSIs to be analyzed in directory: ' + dirName)
+
+        resultsDir = resultsPath + dirName[len(WSIrootFolder):]
+        resultsDirNPYfiles = resultsDir + '/npyFiles'
+        if not os.path.exists(resultsDirNPYfiles):
+            os.makedirs(resultsDirNPYfiles)
+
+        # traverse through all found WSIs
+        for no, fname in tqdm(enumerate(fileListWSI)):
+
+            # Extract/print relevant parameters
+                # try:
+                # except:
+                #     logger.info('Slide {} or its spacings not readable, slide skipped!'.format(fname))
+
+            # img_WSI = imread(os.path.join(dirName,fname))
+            # spacings = np.array([0.2197, 0.2197])
+            # spacings = 
+            try:
+                slide = osl.OpenSlide(os.path.join(dirName, fname))
+                print(slide.properties)
+                logger.info(str(no + 1) + ':  WSI:\t' + fname)
+                if 'openslide.mpp-x' in slide.properties.keys():
+                    spacings = np.array([float(slide.properties['openslide.mpp-x']), float(slide.properties['openslide.mpp-y'])])
+                else: 
+                    x_spacing = from_xml(slide.properties['tiff.ImageDescription']).images[0].pixels.physical_size_x
+                    y_spacing = from_xml(slide.properties['tiff.ImageDescription']).images[0].pixels.physical_size_y
+                    spacings = np.array([float(x_spacing), float(y_spacing)])
+            except:
+                logger.info('Slide {} or its spacings not readable, slide skipped!'.format(fname))
+                continue
+            levelDims = np.array(slide.level_dimensions)
+            amountLevels = len(levelDims)
+            levelDownsamples = np.asarray(np.round(np.array(slide.level_downsamples)), int)
+
+            logger.info('Spacings: ' + str(spacings))
+            logger.info('Level Dimensions: ' + str(levelDims))
+            logger.info('Level Downsamples: '+str(levelDownsamples))
+
+            suffixCut = -5 if fname.split('.')[-1] == 'ndpi' else -4
+            if fname.split('.')[-1] == 'tif':
+                suffixCut = -8
+
+
+
+            # extract the WSI level that is closest to the target spacing of the tissue segmentation network (increasing efficiency instead of simply taking full resolution, finest level 0) 
+            spacingFactorX = spacings[0] / targetSpacing
+            spacingFactorY = spacings[1] / targetSpacing
+            # x_scaled = round(levelDims[0][0] * spacingFactorX)
+            # y_scaled = round(levelDims[0][1] * spacingFactorY)
+
+            # usedLevel = np.argwhere(np.all(levelDims > [x_scaled, y_scaled], 1) == True).max()
+
+            # # resample to target spacing
+            # logger.info('Image size resampled to FG spacing would be {}, {}, thus level {} with resolution {} chosen as resampling point!'.format(x_scaled, y_scaled, usedLevel, levelDims[usedLevel]))
+            # spacingOnUsedLevelX = spacings[0] * levelDownsamples[usedLevel]
+            # spacingOnUsedLevelY = spacings[1] * levelDownsamples[usedLevel]
+            # downsamplingFactorX = spacingOnUsedLevelX / targetSpacing_FG
+            # downsamplingFactorY = spacingOnUsedLevelY / targetSpacing_FG
+            np_slide = np.array(slide.read_region(location=np.array([0, 0]), level=0, size=levelDims[0]))[:, :, :3]
+
+            # d1 = int(round(imgWSI.shape[1] * downsamplingFactorX))
+            # d2 = int(round(imgWSI.shape[0] * downsamplingFactorY))
+
+            # img_shape = slide.pages[0].shape
+            # print(spacingFactorX, spacingFactorY)
+            # print(targetSpacing_FG)
+            # print(slide.shape)
+            # print(slide.shape[0])
+            d1 = int(round(np_slide.shape[1] * spacingFactorX)) 
+            d2 = int(round(np_slide.shape[0] * spacingFactorY))
+            # # print(d1, d2)
+
+            imgWSI = cv2.resize(np_slide, dsize=(d1, d2), interpolation=cv2.INTER_LINEAR) #dtype: uint8, size: d2 x d1 x 3
+            del np_slide
+            gc.collect()
+
+            print(imgWSI.shape)
+            # imgWSIzeropadded = np.zeros(shape=(d2+patchImgSize_FG-1, d1+patchImgSize_FG-1, 3), dtype=np.float32)
+            # imgWSIzeropadded[:d2,:d1,:] = imgWSI
+
+            # tesselate resampled image
+            # smallOverlappingPatches = patchify(imgWSIzeropadded, patch_size=(patchImgSize_FG, patchImgSize_FG, 3), step=patchImgSize_FG) # CARE: IMAGE DATA AT THE RIGHT AND BOTTOM BORDERS IS LOST !!!
+
+            # tileDataset = []
+            # # with tqdm(total=smallOverlappingPatches.shape[0] * smallOverlappingPatches.shape[1]) as pbar:
+            # for i in range(smallOverlappingPatches.shape[0]):
+            #     for j in range(smallOverlappingPatches.shape[1]):
+            #         tileDataset.append({'name': '{}-{}'.format(i, j), 'data': torch.from_numpy(smallOverlappingPatches[i, j, 0, :, :, :])})
+            #             # pbar.update(1)
+
+            # img_mask = np.zeros(shape=(imgWSIzeropadded.shape[0],imgWSIzeropadded.shape[1]), dtype=bool)
+
+            # create dataloader for concurrent tile processing and prediction computation
+            # dataloader = torch.utils.data.DataLoader(tileDataset, batch_size=minibatchSize_FG, shuffle=False)
+            # with torch.no_grad():
+            #     for i, data in enumerate(dataloader, 0):
+            #         imgBatch = data['data'].permute(0, 3, 1, 2).to(device)
+
+            #         prediction = model_FG(imgBatch)  # prediction should have shape (1,2,512,512)
+            #         prediction = (prediction[:,1,:,:] > prediction[:,0,:,:]).to("cpu").numpy()
+
+            #         for n, d in zip(data['name'], prediction):
+            #             x = int(n.split('-')[0])
+            #             y = int(n.split('-')[1])
+            #             img_mask[x * patchImgSize_FG: (x+1) * patchImgSize_FG, y * patchImgSize_FG: (y+1) * patchImgSize_FG] = d
+
+
+            
+
+            # img_mask = img_mask[:d2,:d1]
+
+            # print('img_mask_shape: ', img_mask.shape)
+            # ###########################################################
+
+            # # postprocessing
+            # img_mask = binary_fill_holes(img_mask)
+
+            # # remove connected regions if too small
+            # regionMinPixels = regionMinSizeUM / (targetSpacing_FG * targetSpacing_FG)
+            # img_mask, _ = label(img_mask)
+            # labeledRegions, numberFGRegions = label(remove_small_objects(img_mask, min_size=regionMinPixels))
+            # if numberFGRegions < 256:
+            #     labeledRegions = np.asarray(labeledRegions, np.uint8)
+
+            # if saveWSICoarseForegroundSegmResults:
+            #     logger.info('Saving WSI-level coarse FG segmentation results...')
+            #     savePredictionOverlayResults(imgWSI, labeledRegions, alpha=alpha, figSize=(labeledRegions.shape[1]/labeledRegions.shape[0]*figHeight, figHeight), fullResultPath=resultsDir + '/' + fname[:suffixCut] + '_0_fgSeg.png')
+
+            # labeledRegions = cv2.resize(labeledRegions, dsize=(slide.shape[0],slide.shape[1]), interpolation = cv2.INTER_NEAREST) # FG RESULTS ON WSI-RESOLUTION, UINT8, REGION IDs
+            # labeledRegions = cv2.resize(labeledRegions, dsize=(levelDims[0][0],levelDims[0][1]), interpolation = cv2.INTER_NEAREST) # FG RESULTS ON WSI-RESOLUTION, UINT8, REGION IDs
+
+            # logger.info('In total -> '+str(numberFGRegions)+' <- regions on WSI detected!')
+
+            # process all detected tissue regions separately 
+            # for regionID in range(1, numberFGRegions+1):
+                # logger.info('#######\n Extract foreground region ' + str(regionID) + '...')
+                # detectedRegion = labeledRegions == regionID
+
+                # # compute bounding box and how much to enlarge bbox to consider wider context utilization (especially for patchify)
+                # temp = np.where(detectedRegion == 1)
+                # bbox = np.array([np.min(temp[0]), np.min(temp[1]), np.max(temp[0]), np.max(temp[1])])
+
+                # shiftMin = round(shiftMinUM / spacings[0])
+                # shiftMax = round(shiftMaxUM / spacings[0])
+
+                # # enlarge bounding box due to wider context consideration
+                # bbox[0] = max(bbox[0] - shiftMin, 0)
+                # bbox[1] = max(bbox[1] - shiftMin, 0)
+                # bbox[2] = min(bbox[2] + shiftMax, detectedRegion.shape[0] - 1) + 1
+                # bbox[3] = min(bbox[3] + shiftMax, detectedRegion.shape[1] - 1) + 1
+                # print(bbox)
+
+                # logger.info('Extract high res patch and segm map...')
+                # img_WSI = 
+                # try:
+                #     img_WSI = np.asarray(np.array(slide.read_region(location=np.array([bbox[1], bbox[0]]), level=0, size=np.array([bbox[3] - bbox[1], bbox[2] - bbox[0]])))[:, :, :3], np.uint8)
+                # except OpenSlideError:
+                #     logger.info('#################################### FILE CORRUPTED - IGNORED ####################################')
+                #     continue
+                # detectedRegion = detectedRegion[bbox[0]:bbox[2], bbox[1]:bbox[3]]
+
+                # extract image and resample into target spacing of the structure segmentation network
+                # downsamplingFactor = spacings[0] / targetSpacing  # Rescaling would be very slow using 'rescale' method!
+                # logger.info('Utilized spacing of slide: '+str(spacings[0])+', Resample both patches using factor: ' + str(downsamplingFactor))
+                # segMap = np.asarray(zoom(detectedRegion, downsamplingFactor, order=0), np.bool)
+                # img_WSI = cv2.resize(img_WSI, dsize=tuple(np.flip(segMap.shape)), interpolation=cv2.INTER_LINEAR)
+                # # segMap = np.asarray(np.round(rescale(segMap, downsamplingFactor, order=0, preserve_range=True, multichannel=False)), np.bool)
+                # assert img_WSI.shape[:2] == segMap.shape, "Error: Upsampling via zoom/resize led to unequal resolutions..."
+                # logger.info('Done - size of extracted resampled high res patch: ' + str(img_WSI.shape))
+
+                # if np.min(segMap.shape) < patchImgSize:
+                #     logger.info('Detected region smaller than window, thus skipped...')
+                #     continue
+
+                ##### PREPROCESSING DONE - NOW: NETWORK SEGMENTATION PART #####
+                # logger.info('Start segmentation process...')
+
+                # preprocess img
+            imgWSI = torch.Tensor(np.array((imgWSI / 255. - 0.5) / 0.5, np.float32))
+            
+
+            # tesselate image and tissue prediction results
+            print('Patchify!')
+            smallOverlappingPatches = patchify(imgWSI.copy(), patch_size=(patchImgSize, patchImgSize, 3), step=segmentationPatchStride) # CARE: IMAGE DATA AT THE RIGHT AND BOTTOM BORDERS IS LOST !!!
+            # smallOverlappingPatches_FG = patchify(segMap.copy(), patch_size=(patchImgSize, patchImgSize), step=segmentationPatchStride)
+
+            tileDataset = []
+            for i in range(smallOverlappingPatches.shape[0]):
+                for j in range(smallOverlappingPatches.shape[1]):
+                    # if smallOverlappingPatches_FG[i,j,:,:].any():
+                    tileDataset.append({'name': '{}-{}'.format(i, j), 'data': torch.from_numpy(smallOverlappingPatches[i, j, 0, :, :, :])})
+
+            print('tileDataset: ', len(tileDataset))
+            # calculate segmentation patch size since patchify cuts of last patch if not exactly fitting in window
+            startX = (patchImgSize - patchSegmSize) // 2; startY = startX
+            endX = segmentationPatchStride * (smallOverlappingPatches.shape[0]-1) + patchSegmSize + startX
+            endY = segmentationPatchStride * (smallOverlappingPatches.shape[1]-1) + patchSegmSize + startY
+
+            bigPatchResults = torch.zeros(device=device, size=(ftChannelsOutput, endX - startX, endY - startY))
+
+            # create dataloader for concurrent prediction computation
+            print('Create Dataloader.')
+            dataloader = torch.utils.data.DataLoader(tileDataset, batch_size=minibatchSize, shuffle=False)
+            print('Run Inference!')
+            with torch.no_grad():
+                for i, data in enumerate(dataloader, 0):
+                    imgBatch = data['data'].permute(0, 3, 1, 2).to(device)
+
+                    prediction = torch.softmax(model(imgBatch), dim=1)  # shape: (minibatchSize, 8, 516, 516)
+                    if applyTestTimeAugmentation:
+                        imgBatch = imgBatch.flip(2)
+                        prediction += torch.softmax(model(imgBatch), 1).flip(2)
+
+                        imgBatch = imgBatch.flip(3)
+                        prediction += torch.softmax(model(imgBatch), 1).flip(3).flip(2)
+
+                        imgBatch = imgBatch.flip(2)
+                        prediction += torch.softmax(model(imgBatch), 1).flip(3)
+
+                    if centerWeighting:
+                        prediction[:, :, patchSegmSize // 4: patchSegmSize // 4 * 3, patchSegmSize // 4: patchSegmSize // 4 * 3] *= centerWeight
+
+                    # prediction = prediction.to("cpu")
+                    # print('Add prediction results to bigPatchResults.')
+                    for n, d in zip(data['name'], prediction):
+                        x = int(n.split('-')[0])
+                        y = int(n.split('-')[1])
+                        bigPatchResults[:, x * segmentationPatchStride: x * segmentationPatchStride + patchSegmSize, y * segmentationPatchStride: y * segmentationPatchStride + patchSegmSize] = d
+
+            bigPatchResults = torch.argmax(bigPatchResults, 0).byte()# shape: (1536, 2048)
+            print(bigPatchResults.shape)
+            logger.info('Predictions generated. Final shape: '+str(bigPatchResults.shape))
+
+                    # Context margin + border patches not fully inside img removed
+            imgWSI = imgWSI[startX:endX, startY:endY, :]
+                # segMap = segMap[startX:endX, startY:endY]
+                # bgMap = np.logical_not(segMap)
+
+                        # Save cropped foreground segmentation result as overlay
+                        # if saveCroppedWSIimg:
+                        #     logger.info('Saving cropped segmented WSI image...')
+                        #     saveImage(img_WSI, fullResultPath=resultsDir + '/' + fname[:suffixCut] +'_'+ i +'_'+str(regionID)+'_fgWSI_({}_{}_{}).png'.format(bbox[0], bbox[1], spacings[0]), figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight))
+
+                        # # correct foreground segmentation including all touching vein prediction instances
+                        # bigPatchResults[bgMap] = 4 #vein class assignment of bg
+                        # temp = bigPatchResults == 4
+                        # bgMap = np.logical_xor(clear_border(temp), temp)
+                        # segMap = np.logical_not(bgMap)
+                        # segMap = binary_fill_holes(segMap)
+                        # bgMap = np.logical_not(segMap)
+
+                        # remove small fg components
+                        # temp, numberLabeledRegions = label(segMap, struc3)
+                        # if numberLabeledRegions > 1:
+                        #     regionMinPixels = regionMinSizeUM / (targetSpacing * targetSpacing)
+                        #     regionIDs = np.where(np.array([region.area for region in regionprops(temp)]) > regionMinSizeUM)[0] + 1
+                        #     segMap = np.isin(temp, regionIDs)
+                        #     bgMap = np.logical_not(segMap)
+
+                        # bigPatchResults[bgMap] = labelBG # color of label 'labelBG' => Purple represents BG just for visualization purposes
+
+            # logger.info('Saving prediction and background overlay results...')
+            savePredictionOverlayResults(imgWSI, bigPatchResults, resultsDir + '/' + fname[:suffixCut] +'_resultOverlay.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+            if saveWSIandPredNumpy:
+                logger.info('Saving numpy img...')
+                np.save(resultsDirNPYfiles + '/' + fname[:suffixCut] +'_resultWSI.npy', imgWSI)
+
+            logger.info('Start postprocessing...')
+
+            # remove border class
+            bigPatchResults[bigPatchResults == 7] = 0
+
+            # Delete BG to reduce postprocessing overhead
+            # bigPatchResults[bgMap] = 0
+
+            ################# HOLE FILLING ################
+            bigPatchResults[binary_fill_holes(bigPatchResults == 1)] = 1  # tubuli
+            bigPatchResults[binary_fill_holes(bigPatchResults == 4)] = 4  # veins
+            temp = binary_fill_holes(bigPatchResults == 3)  # tuft
+            bigPatchResults[binary_fill_holes(np.logical_or(bigPatchResults == 3, bigPatchResults == 2))] = 2  # glom
+            bigPatchResults[temp] = 3  # tuft
+            temp = binary_fill_holes(bigPatchResults == 6)  # artery_lumen
+            bigPatchResults[binary_fill_holes(np.logical_or(bigPatchResults == 5, bigPatchResults == 6))] = 5  # full_artery
+            bigPatchResults[temp] = 6  # artery_lumen
+
+            ###### REMOVING TOO SMALL CONNECTED REGIONS ######
+            temp, _ = label(bigPatchResults == 1)
+            finalResults_Instance = remove_small_objects(temp, min_size=TUBULI_MIN_SIZE) > 0
+
+            ############ PERFORM TUBULE DILATION ############
+            finalResults_Instance, numberTubuli = label(finalResults_Instance) #dtype: int32
+            finalResults_Instance[finalResults_Instance > 0] += (tubuliInstanceID_StartsWith - 1)
+            if numberTubuli < 65500:
+                finalResults_Instance = cv2.dilate(np.asarray(finalResults_Instance, np.uint16), kernel=np.asarray(generate_ball(2), np.uint8), iterations=1) #RESULT TYPE: UINT16
+            else:
+                finalResults_Instance = np.asarray(cv2.dilate(np.asarray(finalResults_Instance, np.float64), kernel=np.asarray(generate_ball(2), np.uint8), iterations=1), np.int32)
+
+            temp, _ = label(np.logical_or(bigPatchResults == 2, bigPatchResults == 3))
+            finalResults_Instance[remove_small_objects(temp, min_size=GLOM_MIN_SIZE) > 0] = 2
+            temp, _ = label(bigPatchResults == 3)
+            finalResults_Instance[np.logical_and(remove_small_objects(temp, min_size=TUFT_MIN_SIZE) > 0, finalResults_Instance==2)] = 3
+            temp, _ = label(bigPatchResults == 4)
+            finalResults_Instance[remove_small_objects(temp, min_size=VEIN_MIN_SIZE) > 0] = 4
+            temp, _ = label(np.logical_or(bigPatchResults == 5, bigPatchResults == 6))
+            finalResults_Instance[remove_small_objects(temp, min_size=ARTERY_MIN_SIZE) > 0] = 5
+            temp, _ = label(bigPatchResults == 6)
+            finalResults_Instance[np.logical_and(remove_small_objects(temp, min_size=LUMEN_MIN_SIZE) > 0, finalResults_Instance==5)] = 6
+
+            # finalResults_Instance = finalResults_Instance * segMap    
+
+            logger.info('Done - Save final instance overlay results...')
+            savePredictionOverlayResults(imgWSI, finalResults_Instance, resultsDir + '/' + fname[:suffixCut] +'_resultOverlayFINALInstance.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+            logger.info('Done - Save final non-instance overlay results...')
+            finalResults = finalResults_Instance.copy()
+            finalResults[finalResults > tubuliInstanceID_StartsWith] = 1
+            savePredictionOverlayResults(imgWSI, finalResults, resultsDir + '/' + fname[:suffixCut]+'_resultOverlayFINAL.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+            # finalResults_Instance[bgMap] = labelBG
+
+            if saveWSIandPredNumpy:
+                logger.info('Saving numpy final instance prediction results...')
+                np.save(resultsDirNPYfiles + '/' + fname[:suffixCut] +'_finalInstancePrediction.npy', finalResults_Instance)
+
+            del imgWSI
+            gc.collect()
+        logger.info('####################')
+
+    # break
+
+# except:
+#     logger.exception('! Exception !')
+#     raise
+
+log.info('%%%% Ended regularly ! %%%%')
diff --git a/segment_WSI_nnunetv2.py b/segment_WSI_nnunetv2.py
new file mode 100644
index 0000000..88a708d
--- /dev/null
+++ b/segment_WSI_nnunetv2.py
@@ -0,0 +1,546 @@
+# this file recursively performs the automated segmentation of WSIs by applying the tissue segmentation and structure segmentation CNN to all WSIs contained in a specified folder
+
+import numpy as np
+import os
+import sys
+import cv2
+import torch
+import math
+import logging as log
+from tqdm import tqdm, trange
+import re
+
+# from openslide.lowlevel import OpenSlideError
+import openslide as osl
+from ome_types import from_xml
+from PIL import Image
+import matplotlib.pyplot as plt
+from scipy.ndimage import label
+from scipy.ndimage import zoom
+from scipy.ndimage import binary_dilation, binary_closing, binary_fill_holes, binary_erosion, binary_opening
+import scipy.ndimage
+import scipy as sp
+from skimage.transform import rescale, resize
+from skimage.measure import regionprops
+from skimage.segmentation import flood, flood_fill
+from skimage.color import rgb2gray
+from skimage.segmentation import clear_border
+from skimage import filters
+from skimage.morphology import remove_small_objects
+
+from utils import generate_ball, patchify, unpatchify, saveOverlayResults, savePredictionOverlayResults, savePredictionOverlayResults_Fast, saveImage
+from model import Custom
+
+from pathlib import Path
+
+import nnunetv2
+import numpy as np
+import torch
+from batchgenerators.dataloading.data_loader import DataLoader
+from batchgenerators.dataloading.multi_threaded_augmenter import MultiThreadedAugmenter
+from batchgenerators.transforms.utility_transforms import NumpyToTensor
+from batchgenerators.utilities.file_and_folder_operations import load_json, join, isfile, maybe_mkdir_p, isdir, subdirs, \
+    save_json
+from nnunetv2.configuration import default_num_processes
+from nnunetv2.inference.export_prediction import export_prediction_from_softmax
+from nnunetv2.inference.sliding_window_prediction import predict_sliding_window_return_logits, compute_gaussian
+from nnunetv2.preprocessing.preprocessors.default_preprocessor import DefaultPreprocessor
+from nnunetv2.utilities.file_path_utilities import get_output_folder, should_i_save_to_file, check_workers_busy
+from nnunetv2.utilities.find_class_by_name import recursive_find_python_class
+from nnunetv2.utilities.json_export import recursive_fix_for_json_export
+from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels, convert_labelmap_to_one_hot
+from nnunetv2.utilities.plans_handling.plans_handler import PlansManager, ConfigurationManager
+from nnunetv2.utilities.utils import create_lists_from_splitted_dataset_folder
+
+from tifffile import imread
+# from aicsimageio import AICSImage
+from tqdm import tqdm
+import gc
+
+
+def load_what_we_need(model_training_output_dir, use_folds, checkpoint_name):
+    # we could also load plans and dataset_json from the init arguments in the checkpoint. Not quite sure what is the
+    # best method so we leave things as they are for the moment.
+    dataset_json = load_json(join(model_training_output_dir, 'dataset.json'))
+    plans = load_json(join(model_training_output_dir, 'plans.json'))
+    plans_manager = PlansManager(plans)
+
+    if isinstance(use_folds, str):
+        use_folds = [use_folds]
+
+    parameters = []
+    for i, f in enumerate(use_folds):
+        f = int(f) if f != 'all' else f
+        checkpoint = torch.load(join(model_training_output_dir, f'fold_{f}', checkpoint_name),
+                                map_location=torch.device('cpu'))
+        if i == 0:
+            trainer_name = checkpoint['trainer_name']
+            configuration_name = checkpoint['init_args']['configuration']
+            inference_allowed_mirroring_axes = checkpoint['inference_allowed_mirroring_axes'] if \
+                'inference_allowed_mirroring_axes' in checkpoint.keys() else None
+
+        parameters.append(checkpoint['network_weights'])
+
+    configuration_manager = plans_manager.get_configuration(configuration_name)
+    # restore network
+    num_input_channels = determine_num_input_channels(plans_manager, configuration_manager, dataset_json)
+    trainer_class = recursive_find_python_class(join(nnunetv2.__path__[0], "training", "nnUNetTrainer"),
+                                                trainer_name, 'nnunetv2.training.nnUNetTrainer')
+    network = trainer_class.build_network_architecture(plans_manager, dataset_json, configuration_manager,
+                                                    num_input_channels, enable_deep_supervision=False)
+    return parameters, configuration_manager, inference_allowed_mirroring_axes, plans_manager, dataset_json, network, trainer_name
+
+# WSIrootFolder = '/homeStor1/ylan/tissue_detection/Markus/'#'SPECIFIED WSI FOLDER'
+# WSIrootFolder = '/homeStor1/ylan/tissue_detection/Markus/p21_kidrightp789_B10_PAS'#'SPECIFIED WSI FOLDER'
+# WSIrootFolder = '/homeStor1/datasets/Cooperations/MarkusRinschen/'#'SPECIFIED WSI FOLDER'
+WSIrootFolder = '/homeStor1/ylan/data/MarkusRinschen/debug_2/'#'SPECIFIED WSI FOLDER'
+# WSIrootFolder = '/homeStor1/datasets/DeepGraft/Aachen_Biopsy_Slides_Extended'#'SPECIFIED WSI FOLDER'
+modelpath = '/homeStor1/nbouteldja/Results_ActivePAS/custom_train_val_test_e500_b6_r0.001_w1e-05_516_640_32_RAdam_instance_1deeper_Healthy_UUO_Adenine_Alport_IRI_NTN_fewSpecies_fewHuman_Background_+-1range_X/Model/finalModel.pt'
+resultsPath = '/homeStor1/ylan/data/MarkusRinschen/debug/' #'RESULTS PATH'
+# resultsPath = WSIrootFolder
+
+
+if not os.path.exists(resultsPath):
+    os.makedirs(resultsPath)
+
+# model_FG_path = 'TISSUE SEGMENTATION MODEL PATH'
+
+patchSegmSize = 516
+patchImgSize = 640
+patchLengthUM = 174.
+
+patchImgSize_FG = 512
+patchLengthUM_FG = 1500 #2500
+regionMinSizeUM = 3E5
+
+alpha = 0.3
+strideProportion = 0.5
+figHeight = 15
+minibatchSize = 2
+minibatchSize_FG = 1
+useAllGPUs = False
+GPUno = 0
+device = torch.device("cuda:" + str(GPUno) if torch.cuda.is_available() else "cpu")
+
+tubuliInstanceID_StartsWith = 10  # => tubuli instances start with id 10
+
+ftChannelsOutput = 8
+applyTestTimeAugmentation = True
+centerWeighting = False
+centerWeight = 3.
+
+saveWSICoarseForegroundSegmResults = True
+# saveCroppedForegroundSegmResults = False
+saveCroppedWSIimg = True
+saveWSIandPredNumpy = True
+
+TUBULI_MIN_SIZE = 400
+GLOM_MIN_SIZE = 1500
+TUFT_MIN_SIZE = 500
+VEIN_MIN_SIZE = 3000
+ARTERY_MIN_SIZE = 400
+LUMEN_MIN_SIZE = 20
+
+labelBG = 8
+
+# LOAD STRUCTURE SEGMENTATION MODEL
+model = Custom(input_ch=3, output_ch=ftChannelsOutput, modelDim=2)
+model.load_state_dict(torch.load(modelpath, map_location=lambda storage, loc: storage))
+model.train(False)
+model.eval()
+
+if useAllGPUs:
+    model = torch.nn.DataParallel(model)  # multi-GPUs
+model = model.to(device)
+
+# LOAD TISSUE SEGMENTATION MODEL
+use_folds = [0]
+model_training_output_dir = f'/homeStor1/ylan/data/nnUNet/nnUNet_results/Dataset555_TissueDetection/nnUNetTrainer__nnUNetPlans__2d'
+checkpoint_name = 'checkpoint_best.pth'
+
+parameters, configuration_manager, inference_allowed_mirroring_axes, \
+        plans_manager, dataset_json, network, trainer_name = load_what_we_need(model_training_output_dir, use_folds, checkpoint_name)
+# model = network
+model_FG = network
+network.load_state_dict(parameters[0])
+
+
+# model_FG = torch.load(model_FG_path)
+model_FG = model_FG.to(device)
+model_FG.eval()
+
+segmentationPatchStride = int(patchSegmSize * strideProportion)
+targetSpacing = patchLengthUM / patchSegmSize
+
+targetSpacing_FG = patchLengthUM_FG / patchImgSize_FG
+
+shiftMinUM = (patchImgSize - patchSegmSize) // 2 * targetSpacing + 2 # + 2 just for sufficient margin reasons
+shiftMaxUM = ((patchImgSize - patchSegmSize) // 2 + patchSegmSize * strideProportion) * targetSpacing
+
+# Set up logger
+log.basicConfig(
+    level=log.INFO,
+    format='%(asctime)s %(message)s',
+    datefmt='%Y/%m/%d %I:%M:%S %p',
+    handlers=[
+        log.FileHandler(resultsPath + '/LOGS.log', 'w'),
+        log.StreamHandler(sys.stdout)
+    ])
+logger = log.getLogger()
+
+struc3 = generate_ball(1)
+
+# walk through directoy and its subdirectoy recursively
+for dirName, subdirList, fileList in os.walk(WSIrootFolder):
+
+    # print(fileList)
+    # filter WSIs in current directory
+    fileListWSI = sorted([fname for fname in fileList if (fname.endswith('.svs') or fname.endswith('.ndpi') or fname.endswith('.scn') or fname.endswith('.tif') or fname.endswith('.ome.tif')) and 'PAS' in fname])
+    # fileListWSI = ['Aachen_KiBiDatabase_KiBiAcADEZ140_01_001_PAS.svs']
+    if len(fileListWSI) != 0:
+        logger.info(str(len(fileListWSI)) + ' WSIs to be analyzed in directory: ' + dirName)
+
+        resultsDir = resultsPath + dirName[len(WSIrootFolder):]
+        resultsDirNPYfiles = resultsDir + '/npyFiles'
+        if not os.path.exists(resultsDirNPYfiles):
+            os.makedirs(resultsDirNPYfiles)
+
+        # traverse through all found WSIs
+        for no, fname in tqdm(enumerate(fileListWSI)):
+            print(fname)
+            # Extract/print relevant parameters
+                # try:
+                # except:
+                #     logger.info('Slide {} or its spacings not readable, slide skipped!'.format(fname))
+
+            # img_WSI = imread(os.path.join(dirName,fname))
+            # spacings = np.array([0.2197, 0.2197])
+            # spacings = 
+            try:
+                slide = osl.OpenSlide(os.path.join(dirName, fname))
+                logger.info(str(no + 1) + ':  WSI:\t' + fname)
+                if 'openslide.mpp-x' in slide.properties.keys():
+                    spacings = np.array([float(slide.properties['openslide.mpp-x']), float(slide.properties['openslide.mpp-y'])])
+                else: 
+                    x_spacing = from_xml(slide.properties['tiff.ImageDescription']).images[0].pixels.physical_size_x
+                    y_spacing = from_xml(slide.properties['tiff.ImageDescription']).images[0].pixels.physical_size_y
+                    spacings = np.array([float(x_spacing), float(y_spacing)])
+            except:
+                logger.info('Slide {} or its spacings not readable, slide skipped!'.format(fname))
+                continue
+            levelDims = np.array(slide.level_dimensions)
+            amountLevels = len(levelDims)
+            levelDownsamples = np.asarray(np.round(np.array(slide.level_downsamples)), int)
+
+            logger.info('Spacings: ' + str(spacings))
+            logger.info('Level Dimensions: ' + str(levelDims))
+            logger.info('Level Downsamples: '+str(levelDownsamples))
+
+            suffixCut = -5 if fname.split('.')[-1] == 'ndpi' else -4
+            if fname.split('.')[-1] == 'tif':
+                suffixCut = -8
+
+
+
+            # extract the WSI level that is closest to the target spacing of the tissue segmentation network (increasing efficiency instead of simply taking full resolution, finest level 0) 
+            spacingFactorX = spacings[0] / targetSpacing
+            spacingFactorY = spacings[1] / targetSpacing
+            # x_scaled = round(levelDims[0][0] * spacingFactorX)
+            # y_scaled = round(levelDims[0][1] * spacingFactorY)
+
+            # usedLevel = np.argwhere(np.all(levelDims > [x_scaled, y_scaled], 1) == True).max()
+
+            # # resample to target spacing
+            # logger.info('Image size resampled to FG spacing would be {}, {}, thus level {} with resolution {} chosen as resampling point!'.format(x_scaled, y_scaled, usedLevel, levelDims[usedLevel]))
+            # spacingOnUsedLevelX = spacings[0] * levelDownsamples[usedLevel]
+            # spacingOnUsedLevelY = spacings[1] * levelDownsamples[usedLevel]
+            # downsamplingFactorX = spacingOnUsedLevelX / targetSpacing_FG
+            # downsamplingFactorY = spacingOnUsedLevelY / targetSpacing_FG
+            np_slide = np.array(slide.read_region(location=np.array([0, 0]), level=0, size=levelDims[0]))[:, :, :3]
+
+            # d1 = int(round(imgWSI.shape[1] * downsamplingFactorX))
+            # d2 = int(round(imgWSI.shape[0] * downsamplingFactorY))
+
+            # img_shape = slide.pages[0].shape
+            # print(spacingFactorX, spacingFactorY)
+            # print(targetSpacing_FG)
+            # print(slide.shape)
+            # print(slide.shape[0])
+            d1 = int(round(np_slide.shape[1] * spacingFactorX)) 
+            d2 = int(round(np_slide.shape[0] * spacingFactorY))
+            # # print(d1, d2)
+
+            imgWSI = cv2.resize(np_slide, dsize=(d1, d2), interpolation=cv2.INTER_LINEAR) #dtype: uint8, size: d2 x d1 x 3
+            del np_slide
+            gc.collect()
+
+            print(imgWSI.shape)
+            # imgWSIzeropadded = np.zeros(shape=(d2+patchImgSize_FG-1, d1+patchImgSize_FG-1, 3), dtype=np.float32)
+            # imgWSIzeropadded[:d2,:d1,:] = imgWSI
+
+            # tesselate resampled image
+            # smallOverlappingPatches = patchify(imgWSIzeropadded, patch_size=(patchImgSize_FG, patchImgSize_FG, 3), step=patchImgSize_FG) # CARE: IMAGE DATA AT THE RIGHT AND BOTTOM BORDERS IS LOST !!!
+
+            # tileDataset = []
+            # # with tqdm(total=smallOverlappingPatches.shape[0] * smallOverlappingPatches.shape[1]) as pbar:
+            # for i in range(smallOverlappingPatches.shape[0]):
+            #     for j in range(smallOverlappingPatches.shape[1]):
+            #         tileDataset.append({'name': '{}-{}'.format(i, j), 'data': torch.from_numpy(smallOverlappingPatches[i, j, 0, :, :, :])})
+            #             # pbar.update(1)
+
+            # img_mask = np.zeros(shape=(imgWSIzeropadded.shape[0],imgWSIzeropadded.shape[1]), dtype=bool)
+
+            # create dataloader for concurrent tile processing and prediction computation
+            # dataloader = torch.utils.data.DataLoader(tileDataset, batch_size=minibatchSize_FG, shuffle=False)
+            # with torch.no_grad():
+            #     for i, data in enumerate(dataloader, 0):
+            #         imgBatch = data['data'].permute(0, 3, 1, 2).to(device)
+
+            #         prediction = model_FG(imgBatch)  # prediction should have shape (1,2,512,512)
+            #         prediction = (prediction[:,1,:,:] > prediction[:,0,:,:]).to("cpu").numpy()
+
+            #         for n, d in zip(data['name'], prediction):
+            #             x = int(n.split('-')[0])
+            #             y = int(n.split('-')[1])
+            #             img_mask[x * patchImgSize_FG: (x+1) * patchImgSize_FG, y * patchImgSize_FG: (y+1) * patchImgSize_FG] = d
+
+
+            
+
+            # img_mask = img_mask[:d2,:d1]
+
+            # print('img_mask_shape: ', img_mask.shape)
+            # ###########################################################
+
+            # # postprocessing
+            # img_mask = binary_fill_holes(img_mask)
+
+            # # remove connected regions if too small
+            # regionMinPixels = regionMinSizeUM / (targetSpacing_FG * targetSpacing_FG)
+            # img_mask, _ = label(img_mask)
+            # labeledRegions, numberFGRegions = label(remove_small_objects(img_mask, min_size=regionMinPixels))
+            # if numberFGRegions < 256:
+            #     labeledRegions = np.asarray(labeledRegions, np.uint8)
+
+            # if saveWSICoarseForegroundSegmResults:
+            #     logger.info('Saving WSI-level coarse FG segmentation results...')
+            #     savePredictionOverlayResults(imgWSI, labeledRegions, alpha=alpha, figSize=(labeledRegions.shape[1]/labeledRegions.shape[0]*figHeight, figHeight), fullResultPath=resultsDir + '/' + fname[:suffixCut] + '_0_fgSeg.png')
+
+            # labeledRegions = cv2.resize(labeledRegions, dsize=(slide.shape[0],slide.shape[1]), interpolation = cv2.INTER_NEAREST) # FG RESULTS ON WSI-RESOLUTION, UINT8, REGION IDs
+            # labeledRegions = cv2.resize(labeledRegions, dsize=(levelDims[0][0],levelDims[0][1]), interpolation = cv2.INTER_NEAREST) # FG RESULTS ON WSI-RESOLUTION, UINT8, REGION IDs
+
+            # logger.info('In total -> '+str(numberFGRegions)+' <- regions on WSI detected!')
+
+            # process all detected tissue regions separately 
+            # for regionID in range(1, numberFGRegions+1):
+                # logger.info('#######\n Extract foreground region ' + str(regionID) + '...')
+                # detectedRegion = labeledRegions == regionID
+
+                # # compute bounding box and how much to enlarge bbox to consider wider context utilization (especially for patchify)
+                # temp = np.where(detectedRegion == 1)
+                # bbox = np.array([np.min(temp[0]), np.min(temp[1]), np.max(temp[0]), np.max(temp[1])])
+
+                # shiftMin = round(shiftMinUM / spacings[0])
+                # shiftMax = round(shiftMaxUM / spacings[0])
+
+                # # enlarge bounding box due to wider context consideration
+                # bbox[0] = max(bbox[0] - shiftMin, 0)
+                # bbox[1] = max(bbox[1] - shiftMin, 0)
+                # bbox[2] = min(bbox[2] + shiftMax, detectedRegion.shape[0] - 1) + 1
+                # bbox[3] = min(bbox[3] + shiftMax, detectedRegion.shape[1] - 1) + 1
+                # print(bbox)
+
+                # logger.info('Extract high res patch and segm map...')
+                # img_WSI = 
+                # try:
+                #     img_WSI = np.asarray(np.array(slide.read_region(location=np.array([bbox[1], bbox[0]]), level=0, size=np.array([bbox[3] - bbox[1], bbox[2] - bbox[0]])))[:, :, :3], np.uint8)
+                # except OpenSlideError:
+                #     logger.info('#################################### FILE CORRUPTED - IGNORED ####################################')
+                #     continue
+                # detectedRegion = detectedRegion[bbox[0]:bbox[2], bbox[1]:bbox[3]]
+
+                # extract image and resample into target spacing of the structure segmentation network
+                # downsamplingFactor = spacings[0] / targetSpacing  # Rescaling would be very slow using 'rescale' method!
+                # logger.info('Utilized spacing of slide: '+str(spacings[0])+', Resample both patches using factor: ' + str(downsamplingFactor))
+                # segMap = np.asarray(zoom(detectedRegion, downsamplingFactor, order=0), np.bool)
+                # img_WSI = cv2.resize(img_WSI, dsize=tuple(np.flip(segMap.shape)), interpolation=cv2.INTER_LINEAR)
+                # # segMap = np.asarray(np.round(rescale(segMap, downsamplingFactor, order=0, preserve_range=True, multichannel=False)), np.bool)
+                # assert img_WSI.shape[:2] == segMap.shape, "Error: Upsampling via zoom/resize led to unequal resolutions..."
+                # logger.info('Done - size of extracted resampled high res patch: ' + str(img_WSI.shape))
+
+                # if np.min(segMap.shape) < patchImgSize:
+                #     logger.info('Detected region smaller than window, thus skipped...')
+                #     continue
+
+                ##### PREPROCESSING DONE - NOW: NETWORK SEGMENTATION PART #####
+                # logger.info('Start segmentation process...')
+
+                # preprocess img
+            imgWSI = np.array((imgWSI / 255. - 0.5) / 0.5, np.float32)
+            
+
+            # tesselate image and tissue prediction results
+            print('Patchify!')
+            smallOverlappingPatches = patchify(imgWSI, patch_size=(patchImgSize, patchImgSize, 3), step=segmentationPatchStride) # CARE: IMAGE DATA AT THE RIGHT AND BOTTOM BORDERS IS LOST !!!
+            # smallOverlappingPatches_FG = patchify(segMap.copy(), patch_size=(patchImgSize, patchImgSize), step=segmentationPatchStride)
+
+            tileDataset = []
+            for i in range(smallOverlappingPatches.shape[0]):
+                for j in range(smallOverlappingPatches.shape[1]):
+                    # if smallOverlappingPatches_FG[i,j,:,:].any():
+                    tileDataset.append({'name': '{}-{}'.format(i, j), 'data': torch.from_numpy(smallOverlappingPatches[i, j, 0, :, :, :])})
+
+            print('tileDataset: ', len(tileDataset))
+            # calculate segmentation patch size since patchify cuts of last patch if not exactly fitting in window
+            startX = (patchImgSize - patchSegmSize) // 2; startY = startX
+            endX = segmentationPatchStride * (smallOverlappingPatches.shape[0]-1) + patchSegmSize + startX
+            endY = segmentationPatchStride * (smallOverlappingPatches.shape[1]-1) + patchSegmSize + startY
+
+            bigPatchResults = torch.zeros(device='cpu', size=(ftChannelsOutput, endX - startX, endY - startY))
+
+            # create dataloader for concurrent prediction computation
+            print('Create Dataloader.')
+            logger.info('Create Dataloader.')
+
+            dataloader = torch.utils.data.DataLoader(tileDataset, batch_size=minibatchSize, shuffle=False)
+            print('Run Inference!')
+            logger.info('Run Inference!')
+
+            with torch.no_grad():
+                for i, data in enumerate(dataloader, 0):
+                    imgBatch = data['data'].permute(0, 3, 1, 2).to(device)
+
+
+                    prediction = torch.softmax(model(imgBatch), dim=1)  # shape: (minibatchSize, 8, 516, 516)
+                    if applyTestTimeAugmentation:
+                        imgBatch = imgBatch.flip(2)
+                        prediction += torch.softmax(model(imgBatch), 1).flip(2)
+
+                        imgBatch = imgBatch.flip(3)
+                        prediction += torch.softmax(model(imgBatch), 1).flip(3).flip(2)
+
+                        imgBatch = imgBatch.flip(2)
+                        prediction += torch.softmax(model(imgBatch), 1).flip(3)
+
+                    if centerWeighting:
+                        prediction[:, :, patchSegmSize // 4: patchSegmSize // 4 * 3, patchSegmSize // 4: patchSegmSize // 4 * 3] *= centerWeight
+
+                    prediction = prediction.to("cpu")
+                    # print('Add prediction results to bigPatchResults.')
+                    for n, d in zip(data['name'], prediction):
+                        x = int(n.split('-')[0])
+                        y = int(n.split('-')[1])
+                        bigPatchResults[:, x * segmentationPatchStride: x * segmentationPatchStride + patchSegmSize, y * segmentationPatchStride: y * segmentationPatchStride + patchSegmSize] = d
+
+
+
+            bigPatchResults = torch.argmax(bigPatchResults, 0).byte()# shape: (1536, 2048)
+            print(bigPatchResults.shape)
+            logger.info('Predictions generated. Final shape: '+str(bigPatchResults.shape))
+
+                    # Context margin + border patches not fully inside img removed
+            imgWSI = imgWSI[startX:endX, startY:endY, :]
+                # segMap = segMap[startX:endX, startY:endY]
+                # bgMap = np.logical_not(segMap)
+
+                        # Save cropped foreground segmentation result as overlay
+                        # if saveCroppedWSIimg:
+                        #     logger.info('Saving cropped segmented WSI image...')
+                        #     saveImage(img_WSI, fullResultPath=resultsDir + '/' + fname[:suffixCut] +'_'+ i +'_'+str(regionID)+'_fgWSI_({}_{}_{}).png'.format(bbox[0], bbox[1], spacings[0]), figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight))
+
+                        # # correct foreground segmentation including all touching vein prediction instances
+                        # bigPatchResults[bgMap] = 4 #vein class assignment of bg
+                        # temp = bigPatchResults == 4
+                        # bgMap = np.logical_xor(clear_border(temp), temp)
+                        # segMap = np.logical_not(bgMap)
+                        # segMap = binary_fill_holes(segMap)
+                        # bgMap = np.logical_not(segMap)
+
+                        # remove small fg components
+                        # temp, numberLabeledRegions = label(segMap, struc3)
+                        # if numberLabeledRegions > 1:
+                        #     regionMinPixels = regionMinSizeUM / (targetSpacing * targetSpacing)
+                        #     regionIDs = np.where(np.array([region.area for region in regionprops(temp)]) > regionMinSizeUM)[0] + 1
+                        #     segMap = np.isin(temp, regionIDs)
+                        #     bgMap = np.logical_not(segMap)
+
+                        # bigPatchResults[bgMap] = labelBG # color of label 'labelBG' => Purple represents BG just for visualization purposes
+
+            # logger.info('Saving prediction and background overlay results...')
+            savePredictionOverlayResults(imgWSI, bigPatchResults, resultsDir + '/' + fname[:suffixCut] +'_resultOverlay.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+            if saveWSIandPredNumpy:
+                logger.info('Saving numpy img...')
+                np.save(resultsDirNPYfiles + '/' + fname[:suffixCut] +'_resultWSI.npy', imgWSI)
+
+            logger.info('Start postprocessing...')
+
+            # remove border class
+            bigPatchResults[bigPatchResults == 7] = 0
+
+            # Delete BG to reduce postprocessing overhead
+            # bigPatchResults[bgMap] = 0
+
+            ################# HOLE FILLING ################
+            bigPatchResults[binary_fill_holes(bigPatchResults == 1)] = 1  # tubuli
+            bigPatchResults[binary_fill_holes(bigPatchResults == 4)] = 4  # veins
+            temp = binary_fill_holes(bigPatchResults == 3)  # tuft
+            bigPatchResults[binary_fill_holes(np.logical_or(bigPatchResults == 3, bigPatchResults == 2))] = 2  # glom
+            bigPatchResults[temp] = 3  # tuft
+            temp = binary_fill_holes(bigPatchResults == 6)  # artery_lumen
+            bigPatchResults[binary_fill_holes(np.logical_or(bigPatchResults == 5, bigPatchResults == 6))] = 5  # full_artery
+            bigPatchResults[temp] = 6  # artery_lumen
+
+            ###### REMOVING TOO SMALL CONNECTED REGIONS ######
+            temp, _ = label(bigPatchResults == 1)
+            finalResults_Instance = remove_small_objects(temp, min_size=TUBULI_MIN_SIZE) > 0
+
+            ############ PERFORM TUBULE DILATION ############
+            finalResults_Instance, numberTubuli = label(finalResults_Instance) #dtype: int32
+            finalResults_Instance[finalResults_Instance > 0] += (tubuliInstanceID_StartsWith - 1)
+            if numberTubuli < 65500:
+                finalResults_Instance = cv2.dilate(np.asarray(finalResults_Instance, np.uint16), kernel=np.asarray(generate_ball(2), np.uint8), iterations=1) #RESULT TYPE: UINT16
+            else:
+                finalResults_Instance = np.asarray(cv2.dilate(np.asarray(finalResults_Instance, np.float64), kernel=np.asarray(generate_ball(2), np.uint8), iterations=1), np.int32)
+
+            temp, _ = label(np.logical_or(bigPatchResults == 2, bigPatchResults == 3))
+            finalResults_Instance[remove_small_objects(temp, min_size=GLOM_MIN_SIZE) > 0] = 2
+            temp, _ = label(bigPatchResults == 3)
+            finalResults_Instance[np.logical_and(remove_small_objects(temp, min_size=TUFT_MIN_SIZE) > 0, finalResults_Instance==2)] = 3
+            temp, _ = label(bigPatchResults == 4)
+            finalResults_Instance[remove_small_objects(temp, min_size=VEIN_MIN_SIZE) > 0] = 4
+            temp, _ = label(np.logical_or(bigPatchResults == 5, bigPatchResults == 6))
+            finalResults_Instance[remove_small_objects(temp, min_size=ARTERY_MIN_SIZE) > 0] = 5
+            temp, _ = label(bigPatchResults == 6)
+            finalResults_Instance[np.logical_and(remove_small_objects(temp, min_size=LUMEN_MIN_SIZE) > 0, finalResults_Instance==5)] = 6
+
+            # finalResults_Instance = finalResults_Instance * segMap    
+
+            logger.info('Done - Save final instance overlay results...')
+            savePredictionOverlayResults(imgWSI, finalResults_Instance, resultsDir + '/' + fname[:suffixCut] +'_resultOverlayFINALInstance.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+            if saveWSIandPredNumpy:
+                logger.info('Saving numpy final instance prediction results...')
+                np.save(resultsDirNPYfiles + '/' + fname[:suffixCut] +'_finalInstancePrediction.npy', finalResults_Instance)
+
+            logger.info('Done - Save final non-instance overlay results...')
+            finalResults = finalResults_Instance.copy()
+
+            finalResults[finalResults > tubuliInstanceID_StartsWith] = 1
+            savePredictionOverlayResults(imgWSI, finalResults, resultsDir + '/' + fname[:suffixCut]+'_resultOverlayFINAL.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+            # finalResults_Instance[bgMap] = labelBG
+
+            
+
+            del imgWSI
+            gc.collect()
+        logger.info('####################')
+
+    break
+
+# except:
+#     logger.exception('! Exception !')
+#     raise
+
+log.info('%%%% Ended regularly ! %%%%')
diff --git a/segment_WSI_noTD.py b/segment_WSI_noTD.py
new file mode 100644
index 0000000..08718b2
--- /dev/null
+++ b/segment_WSI_noTD.py
@@ -0,0 +1,569 @@
+import numpy as np
+import os
+import sys
+import logging as log
+from PIL import Image
+from scipy.ndimage import label
+from skimage.color import rgb2gray
+import openslide as osl
+from openslide.lowlevel import OpenSlideError
+from skimage.transform import rescale, resize
+from scipy.ndimage import zoom
+import matplotlib.pyplot as plt
+from skimage.segmentation import flood, flood_fill
+from skimage.color import rgb2gray
+from skimage import filters
+from scipy.ndimage import binary_dilation, binary_closing, binary_fill_holes, binary_erosion, binary_opening
+import scipy.ndimage
+from skimage.measure import regionprops
+from skimage.morphology import remove_small_objects
+import cv2
+
+
+import shutil
+from pathlib import Path
+import torch
+from utils import generate_ball, patchify, unpatchify, saveOverlayResults, savePredictionOverlayResults, savePredictionOverlayResults_Fast, saveImage
+from model import Custom
+from tqdm import tqdm
+
+from torchvision.transforms import Resize
+
+
+from utils import generate_ball, patchify, unpatchify, saveOverlayResults, savePredictionOverlayResults, savePredictionResultsWithoutDilation, saveImage, savePredictionResults, saveRGBPredictionOverlayResults, convert_labelmap_to_rgb_with_instance_first_class
+
+from joblib import Parallel, delayed
+import multiprocessing
+
+
+def writeOutPatchesRowwise(x):
+    for y in range(extractedPatches.shape[1]):
+        if extractedSegm[x, y].sum() > (minFGproportion * imageResolution * imageResolution):
+            Image.fromarray(extractedPatches[x, y, 0]).save(
+                resultsPath + '/' + WSIpath[:suffixCut] + '_' + str(x) + '_' + str(y) + '_' + str(i + 1) + '.png')
+    return
+
+
+TUBULI_MIN_SIZE = 400
+GLOM_MIN_SIZE = 1500
+TUFT_MIN_SIZE = 500
+VEIN_MIN_SIZE = 3000
+ARTERY_MIN_SIZE = 400
+LUMEN_MIN_SIZE = 20
+
+tubuliInstanceID_StartsWith = 10
+
+stain = 'NGAL' #PAS, aSMA, Col III, NGAL, Fibronectin, Meca-32, CD44, F4-80, CD31, AFOG
+# WSIfolder = '/work/scratch/bouteldja/Data_ActivePAS'
+# WSIfolder = '/images/ACTIVE/2015-04_Boor/Nassim_2019/kidney histology'
+WSIfolder = '/homeStor1/ylan/data/MarkusRinschen/640_174uM_annotated/pig/BLOCKS'
+# WSIfolder = '/homeStor1/ylan/data/MarkusRinschen/640_174uM_annotated_0.5/pig/TEST'
+
+modelpath = '/homeStor1/nbouteldja/Results_ActivePAS/custom_train_val_test_e500_b6_r0.001_w1e-05_516_640_32_RAdam_instance_1deeper_Healthy_UUO_Adenine_Alport_IRI_NTN_fewSpecies_fewHuman_Background_+-1range_X/Model/finalModel.pt'
+
+# Patchfolder = '/'
+imageSizeUM = 174
+imageResolution = 640
+strideProportion = 1.0
+minFGproportion = 0.5
+
+
+
+
+resultsPath = '/work/scratch/bouteldja/Data_StainTranslation/'+stain.replace(" ", "")
+resultsPath = os.path.join(WSIfolder, 'flash') 
+# resultsPath = str(Path(WSIfolder).parent / 'flash')
+resultsForegroundSegmentation = resultsPath + '/FGsegm'
+resultsDirNPYfiles = resultsPath + '/npy'
+
+if os.path.exists(resultsForegroundSegmentation):
+    shutil.rmtree(resultsForegroundSegmentation)
+# if os.path.exists(resultsDirNPYfiles):
+#     shutil.rmtree(resultsDirNPYfiles)
+
+onlyPerformForegroundSegmentation = True
+
+saveWSICoarseForegroundSegmResults = True
+saveWSICoarseForegroundSegmResults_RegionSeparate = False
+saveWSICroppedForegroundSegmResults = False
+alpha = 0.3
+figHeight = 40
+
+targetSpacing = imageSizeUM / imageResolution
+shiftUM = imageSizeUM // 3
+
+struc3 = generate_ball(1)
+struc5 = generate_ball(2)
+struc7 = generate_ball(3)
+
+if not os.path.exists(resultsPath):
+    os.makedirs(resultsPath)
+if not os.path.exists(resultsForegroundSegmentation):
+    os.makedirs(resultsForegroundSegmentation)
+if not os.path.exists(resultsDirNPYfiles):
+    os.makedirs(resultsDirNPYfiles)
+
+# Set up logger
+log.basicConfig(
+    level=log.INFO,
+    format='%(asctime)s %(message)s',
+    datefmt='%Y/%m/%d %I:%M:%S %p',
+    handlers=[
+        log.FileHandler(resultsForegroundSegmentation + '/LOGS.log', 'w'),
+        log.StreamHandler(sys.stdout)
+    ])
+logger = log.getLogger()
+
+
+files = sorted(list(filter(lambda x: ('.ndpi' in x or '.svs' in x) and stain in x, os.listdir(WSIfolder))))
+# files = sorted(list(filter(lambda x: '.ndpi' in x or '.svs' in x, os.listdir(WSIfolder))))
+logger.info('Amount of WSIs in folder: ' + str(len(files)))
+
+num_cores = multiprocessing.cpu_count()
+
+detectedRegions = []
+
+minibatchSize = 64
+ftChannelsOutput = 8
+segSize = 516
+device = torch.device("cuda:0"  if torch.cuda.is_available() else "cpu")
+model = Custom(input_ch=3, output_ch=ftChannelsOutput, modelDim=2)
+model.load_state_dict(torch.load(modelpath, map_location=lambda storage, loc: storage))
+model = model.to(device)
+model.train(False)
+model.eval()
+
+directories = list(Path(WSIfolder).iterdir())
+# print(directories)
+
+for slide in directories:
+    # print(slide.stem)
+    
+    slide_name = slide.stem + f'_scene_{str(slide)[-1]}'
+    if slide_name != 'p21_kidrightp131415_B10_PAS_scene_2':
+        continue
+    print(slide_name)
+    resultsDir = os.path.join(resultsPath, slide_name)
+    tempNPYfiles = os.path.join(resultsDirNPYfiles, slide_name)
+
+
+    tileDataset = []
+    x_max = 0
+    y_max = 0
+    x_min = len(list(slide.iterdir()))
+    y_min = x_min
+
+
+    ########################################################
+
+    for patch_path in tqdm(list(slide.iterdir())):
+        name = patch_path.stem
+        # print(name)
+        if name != 'p21_kidrightp131415_B10_PAS.czi - Scene #2_(6-53)':
+            continue
+        coords = np.array(name.split('_')[-1][1:-1].split('-'), int)
+
+        # find max:
+        # x_max = coords[0] if coords[0] > x_max
+
+        if coords[0] > y_max:
+            y_max = coords[0]
+        if coords[1] > x_max:
+            x_max = coords[1]
+        if coords[0] < y_min:
+            y_min = coords[0]
+        if coords[1] < x_min:
+            x_min = coords[1]
+
+
+
+        patch = np.asarray(Image.open(patch_path)).astype(np.float32)
+        # print(patch.shape)
+        patch = torch.from_numpy(patch)
+        tileDataset.append({'data': patch, 'coords': coords})
+    ########################################################
+
+    # print(x_min, x_max)
+    # print(y_min, y_max)
+    # print((x_max - x_min)*imageResolution)
+    # print((y_max - y_min)*imageResolution)
+    batch_size = 50
+    dataloader = torch.utils.data.DataLoader(tileDataset, batch_size=batch_size, shuffle=False)
+    
+
+    bigPatchResults = torch.zeros(device='cpu', size=(ftChannelsOutput, (x_max+1 - x_min)*segSize, (y_max+1 - y_min)*segSize))
+    imgWSI = torch.zeros(device='cpu', size=(3, (x_max+1 - x_min)*segSize, (y_max+1 - y_min)*segSize))
+
+    print(bigPatchResults.shape)
+    print(imgWSI.shape)
+
+
+    with torch.no_grad():
+        for i, data in tqdm(enumerate(dataloader, 0)):
+            imgBatch = data['data'].permute(0, 3, 1, 2).to(device)
+
+            prediction = torch.softmax(model(imgBatch), dim=1)
+            # print(prediction.shape)
+            target_size = (prediction.shape[2], prediction.shape[3])
+            imgBatch = Resize(size=target_size)(imgBatch)
+            
+            # for i in range(imgBatch.shape[0]):
+            #     img = imgBatch[i]
+            #     pred = prediction[i]
+
+                # d1 = int(img.shape[2] * (segSize / imageResolution))
+                # d2 = int(img.shape[1] * (segSize / imageResolution))
+
+                # img = Resize(size=(d1,d2))(img)
+                # print(img)
+
+                # print(img.shape)
+                # img = img.transpose(1,2,0)
+
+                # img = cv2.resize(img, dsize=(d1, d2), interpolation=cv2.INTER_LINEAR).astype(np.uint8)
+                # print(img.shape)
+                # print(pred.shape)
+                # savePredictionOverlayResults(img.cpu().numpy().astype(np.uint8).transpose(1,2,0), torch.argmax(pred, 0).cpu().byte().numpy(), resultsDir + f'_resultOverlay_{i}.png', figSize=(pred.shape[1]/pred.shape[0]*figHeight, figHeight), alpha=alpha)
+                
+            for i in range(imgBatch.shape[0]):
+                n = data['coords'][i]
+                d = prediction[i]
+                img = imgBatch[i]
+                x = (n[1]-x_min)
+                y = (n[0]-y_min)
+                bigPatchResults[:, int(x*segSize*strideProportion): int((x*strideProportion +1)*segSize), int(y*segSize*strideProportion) : int((y*strideProportion+1)*segSize)] = d.cpu()
+                imgWSI[:, int(x*segSize*strideProportion): int((x*strideProportion +1)*segSize), int(y*segSize*strideProportion) : int((y*strideProportion+1)*segSize)] = img.cpu()
+                # imgWSI[:, int(x*imageResolution*strideProportion):int(x*strideProportion*imageResolution + imageResolution), int(y*imageResolution*strideProportion):int(y*strideProportion*imageResolution+imageResolution)] = img.cpu()
+            # break
+    # break
+    bigPatchResults = torch.argmax(bigPatchResults, 0).byte().cpu().numpy()
+    imgWSI = imgWSI.cpu().numpy().astype(np.uint8)
+
+    # print(imgWSI.shape)
+
+    # d1 = int(imgWSI.shape[2] * (segSize / imageResolution))
+    # d2 = int(imgWSI.shape[1] * (segSize / imageResolution))
+
+    # print(d1, d2)
+    imgWSI = imgWSI.transpose(1,2,0)
+
+    # imgWSI = cv2.resize(imgWSI, dsize=(d1, d2), interpolation=cv2.INTER_LINEAR).astype(np.uint8)
+    print(imgWSI.shape)
+    print(bigPatchResults.shape)
+    
+    # img_WSI_prep = np.array((imgWSI / 255. - 0.5) / 0.5, np.float32)
+    # pil_image = Image.fromarray(imgWSI.astype(np.uint8)).convert('RGB')
+    # pil_image.save(resultsDir + '_imgwSI.png')
+
+    # imgWSI = cv2.resize(img)
+    # cv2.imwrite(resultsDir + '_imgwSI.png', imgWSI)
+    
+    # print(bigPatchResults.shape)
+
+    # savePredictionOverlayResults_Fast(imgWSI, bigPatchResults, resultsDir+'_fastOverlay.png', figHeight=figHeight)
+
+
+    # imgWSI = imgWSI.transpose(2, 0, 1)
+
+    # pil_mask = Image.fromarray(bigPatchResults).convert('L')
+    # pil_mask = pil_mask.point(lambda p: 255 if p<0 else 0)
+    # overlaid_image = Image.new('RGBA', image.size)
+    # for i in range(bigPatchResults.shape[0]):
+    #     color = colorMatp[i]
+
+
+
+
+
+
+    savePredictionOverlayResults(imgWSI, bigPatchResults, resultsDir + '_resultOverlay.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+    # if saveWSIandPredNumpy:
+    logger.info('Saving numpy img...')
+    np.save(tempNPYfiles +'_resultWSI.npy', imgWSI)
+
+    logger.info('Start postprocessing...')
+
+    # remove border class
+    bigPatchResults[bigPatchResults == 7] = 0
+
+    # Delete BG to reduce postprocessing overhead
+    # bigPatchResults[bgMap] = 0
+
+    ################# HOLE FILLING ################
+    bigPatchResults[binary_fill_holes(bigPatchResults == 1)] = 1  # tubuli
+    bigPatchResults[binary_fill_holes(bigPatchResults == 4)] = 4  # veins
+    temp = binary_fill_holes(bigPatchResults == 3)  # tuft
+    bigPatchResults[binary_fill_holes(np.logical_or(bigPatchResults == 3, bigPatchResults == 2))] = 2  # glom
+    bigPatchResults[temp] = 3  # tuft
+    temp = binary_fill_holes(bigPatchResults == 6)  # artery_lumen
+    bigPatchResults[binary_fill_holes(np.logical_or(bigPatchResults == 5, bigPatchResults == 6))] = 5  # full_artery
+    bigPatchResults[temp] = 6  # artery_lumen
+
+    ###### REMOVING TOO SMALL CONNECTED REGIONS ######
+    temp, _ = label(bigPatchResults == 1)
+    finalResults_Instance = remove_small_objects(temp, min_size=TUBULI_MIN_SIZE) > 0
+
+    ############ PERFORM TUBULE DILATION ############
+    finalResults_Instance, numberTubuli = label(finalResults_Instance) #dtype: int32
+    finalResults_Instance[finalResults_Instance > 0] += (tubuliInstanceID_StartsWith - 1)
+    if numberTubuli < 65500:
+        finalResults_Instance = cv2.dilate(np.asarray(finalResults_Instance, np.uint16), kernel=np.asarray(generate_ball(2), np.uint8), iterations=1) #RESULT TYPE: UINT16
+    else:
+        finalResults_Instance = np.asarray(cv2.dilate(np.asarray(finalResults_Instance, np.float64), kernel=np.asarray(generate_ball(2), np.uint8), iterations=1), np.int32)
+
+    temp, _ = label(np.logical_or(bigPatchResults == 2, bigPatchResults == 3))
+    finalResults_Instance[remove_small_objects(temp, min_size=GLOM_MIN_SIZE) > 0] = 2
+    temp, _ = label(bigPatchResults == 3)
+    finalResults_Instance[np.logical_and(remove_small_objects(temp, min_size=TUFT_MIN_SIZE) > 0, finalResults_Instance==2)] = 3
+    temp, _ = label(bigPatchResults == 4)
+    finalResults_Instance[remove_small_objects(temp, min_size=VEIN_MIN_SIZE) > 0] = 4
+    temp, _ = label(np.logical_or(bigPatchResults == 5, bigPatchResults == 6))
+    finalResults_Instance[remove_small_objects(temp, min_size=ARTERY_MIN_SIZE) > 0] = 5
+    temp, _ = label(bigPatchResults == 6)
+    finalResults_Instance[np.logical_and(remove_small_objects(temp, min_size=LUMEN_MIN_SIZE) > 0, finalResults_Instance==5)] = 6
+
+    print(finalResults_Instance)
+
+    # finalResults_Instance = finalResults_Instance * segMap    
+
+    logger.info('Done - Save final instance overlay results...')
+    savePredictionOverlayResults(imgWSI, finalResults_Instance, resultsDir +'_resultOverlayFINALInstance.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+    # if saveWSIandPredNumpy:
+    #     logger.info('Saving numpy final instance prediction results...')
+    #     np.save(resultsDirNPYfiles + '/' + fname[:suffixCut] +'_finalInstancePrediction.npy', finalResults_Instance)
+
+    logger.info('Done - Save final non-instance overlay results...')
+    finalResults = finalResults_Instance.copy()
+
+    finalResults[finalResults > tubuliInstanceID_StartsWith] = 1
+    savePredictionOverlayResults(imgWSI, finalResults, resultsDir +'_resultOverlayFINAL.png', figSize=(bigPatchResults.shape[1]/bigPatchResults.shape[0]*figHeight, figHeight), alpha=alpha)
+
+
+
+    del finalResults
+    del finalResults_Instance
+    del imgWSI
+    del bigPatchResults
+
+
+
+# try:
+
+#     for no, WSIpath in enumerate(files):
+#         # Load slide
+#         slide = osl.OpenSlide(os.path.join(WSIfolder, WSIpath))
+
+#         # Extract/print relevant parameters
+#         spacings = np.array([float(slide.properties['openslide.mpp-x']), float(slide.properties['openslide.mpp-y'])])
+#         levelDims = np.array(slide.level_dimensions)
+#         amountLevels = len(levelDims)
+#         levelDownsamples = np.asarray(np.round(np.array(slide.level_downsamples)), np.int)
+
+#         logger.info(str(no + 1) + ':  WSI:\t' + WSIpath + ', Spacing:\t' + str(spacings) + ', levels:\t' + str(amountLevels))
+
+#         if WSIpath.split('.')[-1] == 'ndpi':
+#             suffixCut = -5
+#         else:
+#             suffixCut = -4
+
+#         # specify used wsi level for foreground segmentation: min. 500x500 pixels
+#         usedLevel = np.argwhere(np.all(levelDims > [500, 500], 1) == True).max()
+#         logger.info('Used level for foreground segmentation: ' + str(usedLevel))
+
+#         # extract image from that level
+#         imgRGB = np.array(slide.read_region(location=np.array([0, 0]), level=usedLevel, size=levelDims[usedLevel]))[:, :,:3]
+#         img = rgb2gray(imgRGB)
+
+#         # foreground segmentation
+#         otsu_threshold = filters.threshold_otsu(img)
+
+#         divideSizeFactor = 30
+
+#         if stain == 'PAS':
+#             if '17.40.53' in WSIpath:
+#                 otsu_threshold -= 0.065
+
+#         elif stain == 'aSMA':
+#             if '22.15.27' in WSIpath or '22.28.25' in WSIpath:
+#                 otsu_threshold -= 0.07
+#             if '20.19.57' in WSIpath or '16.59.43' in WSIpath:
+#                 otsu_threshold += 0.024
+
+#         elif stain == 'CD31':
+#             if '18.51.39' in WSIpath:
+#                 otsu_threshold += 0.02
+
+#         elif stain == 'Col III':
+#             if '21.25.37' in WSIpath or '21.42.09' in WSIpath:
+#                 otsu_threshold -= 0.1
+#             elif '2172_13' in WSIpath:
+#                 otsu_threshold += 0.05
+#             else:
+#                 otsu_threshold += 0.055
+
+#         elif stain == 'NGAL':
+#             if '21.40.59' in WSIpath or '21.35.56' in WSIpath:
+#                 otsu_threshold += 0.005
+#             elif '18.04.45' in WSIpath:
+#                 otsu_threshold += 0.07
+#             elif '21.46.20' in WSIpath:
+#                 otsu_threshold += 0.01
+#             else:
+#                 otsu_threshold += 0.05
+
+#         elif stain == 'Fibronectin':
+#             if '23.03.22' in WSIpath:
+#                 otsu_threshold -= 0.08
+#             elif '00.58.23' in WSIpath:
+#                 otsu_threshold += 0.02
+#             else:
+#                 otsu_threshold += 0.05
+
+#         elif stain == 'Meca-32':
+#             divideSizeFactor = 50
+
+#             if '1150-12' in WSIpath:
+#                 otsu_threshold -= 0.097
+#             elif '22.36.35' in WSIpath:
+#                 otsu_threshold -= 0.065
+#             elif '10.23.46' in WSIpath:
+#                 otsu_threshold += 0.05
+#             else:
+#                 otsu_threshold += 0.02
+
+#         elif stain == 'CD44':
+#             if '11.22.14' in WSIpath or '11.28.21' in WSIpath:
+#                 otsu_threshold += 0.085
+#             elif '11.41.12' in WSIpath:
+#                 otsu_threshold -= 0.06
+#             else:
+#                 otsu_threshold += 0.015
+
+
+
+
+#         img_mask = img < otsu_threshold
+#         # img_mask = img < 0.78
+#         logger.info('Utilized threshold: ' + str(otsu_threshold))
+
+#         if stain == 'NGAL' and '18.58.25' in WSIpath:
+#             img_mask[395:405,440:530] = 0
+
+#         # extract connected regions only with at least 1/25 size of WSI
+#         labeledRegions, numberRegions = label(img_mask, struc3)
+#         minRequiredSize = (img_mask.shape[0] * img_mask.shape[1]) // divideSizeFactor
+
+#         argRegions = []
+#         for i in range(1, numberRegions + 1):
+#             if (labeledRegions == i).sum() > minRequiredSize:
+#                 argRegions.append(i)
+
+#         finalWSI_FG = np.zeros(img_mask.shape, dtype=np.bool)
+
+#         # process these regions
+#         for i, arg in enumerate(argRegions):
+#             logger.info('Extract foreground region ' + str(i + 1) + '...')
+#             detectedRegion = labeledRegions == arg
+#             detectedRegion = binary_fill_holes(detectedRegion)
+#             detectedRegion = binary_opening(detectedRegion, structure=struc7)
+
+#             # extract biggest component
+#             labeledRegions2, numberLabeledRegions = label(detectedRegion, struc3)
+#             if numberLabeledRegions > 1:
+#                 argMax = np.array([region.area for region in regionprops(labeledRegions2)]).argmax() + 1
+#                 detectedRegion = labeledRegions2 == argMax
+
+#             # detectedRegion = binary_erosion(detectedRegion, structure=struc3)
+
+#             # Save foreground segmentation (on chosen coarse resolution level) as overlay
+#             if saveWSICoarseForegroundSegmResults_RegionSeparate:
+#                 saveOverlayResults(imgRGB, detectedRegion, alpha=alpha, figHeight=figHeight, fullResultPath=resultsForegroundSegmentation + '/' + WSIpath[:suffixCut] +'_'+str(i + 1)+'_fgSeg.png')
+
+#             finalWSI_FG = np.logical_or(finalWSI_FG, detectedRegion)
+
+#             logger.info('Foreground segmentation done on coarse level...')
+
+#             if onlyPerformForegroundSegmentation:
+#                 continue
+
+#             # enlargement of foreground in order to fully cover border structures
+#             # detectedRegion = binary_erosion(detectedRegion, structure=struc3)
+
+#             # compute bounding box
+#             temp = np.where(detectedRegion == 1)
+
+#             bbox = np.array([np.min(temp[0]), np.min(temp[1]), np.max(temp[0]), np.max(temp[1])])
+
+#             # compute how much to enlarge bbox to consider wider context utilization (especially for patchify)
+#             downsampleFactor = int(levelDownsamples[usedLevel])
+#             shift = round((shiftUM / spacings[0]) / downsampleFactor)
+
+#             # enlarge bounding box due to wider context consideration
+#             bbox[0] = max(bbox[0] - shift, 0)
+#             bbox[1] = max(bbox[1] - shift, 0)
+#             bbox[2] = min(bbox[2] + shift, detectedRegion.shape[0] - 1)
+#             bbox[3] = min(bbox[3] + shift, detectedRegion.shape[1] - 1)
+
+
+#             bbox_WSI = np.asarray(bbox * downsampleFactor, np.int)
+#             logger.info('High res bounding box coordinates: ' + str(bbox_WSI))
+
+#             logger.info('Extract high res patch and segm map...')
+#             try:
+#                 img_WSI = np.array(slide.read_region(location=np.array([bbox_WSI[1], bbox_WSI[0]]), level=0, size=np.array([bbox_WSI[3] - bbox_WSI[1] + downsampleFactor, bbox_WSI[2] - bbox_WSI[0] + downsampleFactor])))[:, :, :3]
+#             except OpenSlideError:
+#                 logger.info('############ FILE CORRUPTED - IGNORED ############')
+#                 continue
+
+#             segMap = zoom(detectedRegion[bbox[0]:bbox[2] + 1, bbox[1]:bbox[3] + 1], downsampleFactor, order=0)
+#             # segMap = rescale(detectedRegion[bbox[0]:bbox[2] + 1, bbox[1]:bbox[3] + 1], downsampleFactor, order=0, preserve_range=True, multichannel=False)
+#             assert img_WSI.shape[:2] == segMap.shape, "Error: Upsampling via Zoom/Rescale lead to unequal resolutions..."
+#             logger.info('Done - size of extracted high res patch: ' + str(img_WSI.shape))
+
+#             downsamplingFactor = spacings[0] / targetSpacing  # Rescaling very slow!
+#             logger.info('Spacing of slide: '+str(spacings[0])+', Resample both patches using factor: ' + str(downsamplingFactor))
+#             img_WSI = np.asarray(np.round(rescale(img_WSI, downsamplingFactor, order=1, preserve_range=True, multichannel=True)), np.uint8)
+#             segMap = np.asarray(zoom(segMap, downsamplingFactor, order=0), np.bool)
+#             # segMap = np.asarray(np.round(rescale(segMap, downsamplingFactor, order=0, preserve_range=True, multichannel=False)), np.bool)
+#             assert img_WSI.shape[:2] == segMap.shape, "Error: Upsampling via Zoom/Rescale lead to unequal resolutions..."
+#             logger.info('Done - size of extracted resampled high res patch: ' + str(img_WSI.shape))
+
+#             # Save cropped foreground segmentation result as overlay
+#             if saveWSICroppedForegroundSegmResults:
+#                 saveOverlayResults(img_WSI, segMap, alpha=alpha, figHeight=figHeight, fullResultPath=resultsForegroundSegmentation + '/' + WSIpath[:suffixCut] +'_'+str(i + 1)+'_fgSeg2.png')
+
+
+#             ##### FOREGROUND SEGMENTATION DONE - NOW PATCH EXTRACTION USING PATCHIFY #####
+#             logger.info('Perform patch extraction...')
+
+#             extractedPatches = patchify(img_WSI.copy(), patch_size=(imageResolution, imageResolution, 3), step=int(imageResolution*strideProportion))  # shape: (5, 7, 1, 640, 640, 3)
+#             extractedSegm = patchify(segMap.copy(), patch_size=(imageResolution, imageResolution), step=int(imageResolution*strideProportion))  # shape: (5, 7, 640, 640)
+
+#             resultsLabel2 = Parallel(n_jobs=num_cores)(delayed(writeOutPatchesRowwise)(x) for x in range(extractedPatches.shape[0]))
+
+#             # for x in range(extractedPatches.shape[0]):
+#             #     for y in range(extractedPatches.shape[1]):
+#             #         if extractedSegm[x,y].sum() > (minFGproportion * imageResolution * imageResolution):
+#             #             Image.fromarray(extractedPatches[x,y,0]).save(resultsPath + '/' + WSIpath[:suffixCut] +'_'+str(x)+'_'+str(y)+'_'+str(i + 1)+'.png')
+
+#             logger.info('Done.')
+
+#         if saveWSICoarseForegroundSegmResults:
+#             saveOverlayResults(imgRGB, finalWSI_FG, alpha=alpha, figHeight=figHeight, fullResultPath=resultsForegroundSegmentation + '/' + WSIpath[:suffixCut] + '_fgSeg_WSI.png')
+
+#         detectedRegions.append(len(argRegions))
+#         logger.info('####################')
+
+#     logger.info('Detected regions of all processed slides:')
+#     logger.info(detectedRegions)
+
+
+
+# except:
+#     logger.exception('! Exception !')
+#     raise
+
+# log.info('%%%% Ended regularly ! %%%%')
+
diff --git a/show_npy.ipynb b/show_npy.ipynb
new file mode 100644
index 0000000..3566da2
--- /dev/null
+++ b/show_npy.ipynb
@@ -0,0 +1,176 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import numpy as np\n",
+    "import os\n",
+    "from PIL import Image\n",
+    "import matplotlib.pylab as plt\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "(34314, 31734)\n"
+     ]
+    }
+   ],
+   "source": [
+    "WSIrootFolder = '/homeStor1/ylan/data/MarkusRinschen/debug_2/npyFiles/'#'SPECIFIED WSI FOLDER'\n",
+    "# print(os.listdir(WSIrootFolder))\n",
+    "finalInstance = np.load(WSIrootFolder+f'p21_kidleftp456_B10_PAS_3_finalInstancePrediction.npy')\n",
+    "print(finalInstance.shape)\n",
+    "# resultWSI = np.load(WSIrootFolder+f'p21_kidleftp456_B10_PAS_3_resultWSI.npy')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "51649\n"
+     ]
+    }
+   ],
+   "source": [
+    "print(finalInstance.max())\n",
+    "c = np.unique(finalInstance==3, return_index=True)\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "(array([False,  True]), array([       0, 49018927]))\n"
+     ]
+    }
+   ],
+   "source": [
+    "print(c)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 11,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "[[False False False ... False False False]\n",
+      " [False False False ... False False False]\n",
+      " [False False False ... False False False]\n",
+      " ...\n",
+      " [False False False ... False False False]\n",
+      " [False False False ... False False False]\n",
+      " [False False False ... False False False]]\n"
+     ]
+    }
+   ],
+   "source": [
+    "\n",
+    "\n",
+    "# resized = np.resize(finalInstance, (1000, 1000))\n",
+    "# print(finalInstance==3)\n",
+    "channel = finalInstance == (1)\n",
+    "resized = np.resize(channel, (1000, 1000))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 12,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "<matplotlib.image.AxesImage at 0x7f4fc41bab50>"
+      ]
+     },
+     "execution_count": 12,
+     "metadata": {},
+     "output_type": "execute_result"
+    },
+    {
+     "data": {
+      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAaIAAAGiCAYAAAClC8JvAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy89olMNAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAsQklEQVR4nO3de3TUdX7/8VcSmAGESbglIUtAEAUjAcotTFUqJSVAtKB4CkjdgIiFJhwhiiErC2h7Tix0u+Jyq8dTY89Z5NIjsIKEpsGEKuEWiRCQVNhosDABwWQgQq6f3x/+8i2zRJdA4EOW5+Oc79nM9/P+fr+f91zy2sl8BoOMMUYAAFgSbHsCAIC7G0EEALCKIAIAWEUQAQCsIogAAFYRRAAAqwgiAIBVBBEAwCqCCABgFUEEALCqxQfRqlWrdO+996pNmzaKi4vT/v37bU8JANAELTqINmzYoNTUVC1ZskSfffaZBg4cqISEBJ09e9b21AAA1ymoJf+jp3FxcRo2bJhWrlwpSaqvr1d0dLTmzp2rhQsXWp4dAOB6tLI9gRtVXV2tgoICpaenO/uCg4MVHx+v/Pz8Ro+pqqpSVVWVc7u+vl4XLlxQ586dFRQUdMvnDAB/yowxunjxoqKiohQcfP1/cGuxQfTtt9+qrq5OERERAfsjIiJ0/PjxRo/JyMjQa6+9djumBwB3rVOnTql79+7XXd9ig+hGpKenKzU11bldUVGhHj166BGNVyu1tjgzAGj5alWjT/SROnTo0KTjWmwQdenSRSEhISorKwvYX1ZWpsjIyEaPcbvdcrvd1+xvpdZqFUQQAcBN+f8rDpr6UUeLXTXncrk0ZMgQ5eTkOPvq6+uVk5Mjr9drcWYAgKZose+IJCk1NVVJSUkaOnSohg8frjfffFOVlZWaMWOG7akBAK5Tiw6iyZMn69y5c1q8eLF8Pp8GDRqkrKysaxYwAADuXC36e0Q3y+/3KzQ0VI9pAp8RAcBNqjU1ytVWVVRUyOPxXPdxLfYzIgDAnwaCCABgFUEEALCKIAIAWEUQAQCsIogAAFYRRAAAqwgiAIBVBBEAwCqCCABgFUEEALCKIAIAWEUQAQCsIogAAFYRRAAAqwgiAIBVBBEAwCqCCABgFUEEALCKIAIAWEUQAQCsIogAAFYRRAAAqwgiAIBVBBEAwCqCCABgFUEEALCKIAIAWEUQAQCsIogAAFYRRAAAqwgiAIBVBBEAwCqCCABgFUEEALCKIAIAWEUQAQCsIogAAFYRRAAAqwgiAIBVBBEAwCqCCABgFUEEALCq2YNo6dKlCgoKCtj69evnjF+5ckXJycnq3Lmz2rdvr0mTJqmsrCzgHKWlpUpMTFS7du0UHh6uBQsWqLa2NqAmNzdXgwcPltvtVp8+fZSZmdncrQAAboNb8o7ooYce0pkzZ5ztk08+ccbmz5+vDz/8UJs2bVJeXp5Onz6tp556yhmvq6tTYmKiqqurtWfPHr333nvKzMzU4sWLnZqSkhIlJiZq1KhRKiws1Lx58/T8889r586dt6IdAMAtFGSMMc15wqVLl2rLli0qLCy8ZqyiokJdu3bVunXr9PTTT0uSjh8/rgcffFD5+fkaMWKEduzYoccff1ynT59WRESEJGnt2rVKS0vTuXPn5HK5lJaWpu3bt6uoqMg595QpU1ReXq6srKzrnqvf71doaKge0wS1Cmp9c40DwF2u1tQoV1tVUVEhj8dz3cfdkndEX375paKiotS7d29NmzZNpaWlkqSCggLV1NQoPj7eqe3Xr5969Oih/Px8SVJ+fr5iY2OdEJKkhIQE+f1+HT161Km5+hwNNQ3n+DFVVVXy+/0BGwDArmYPori4OGVmZiorK0tr1qxRSUmJHn30UV28eFE+n08ul0thYWEBx0RERMjn80mSfD5fQAg1jDeM/VSN3+/X5cuXf3RuGRkZCg0Ndbbo6OibbRcAcJNaNfcJx40b5/w8YMAAxcXFqWfPntq4caPatm3b3JdrkvT0dKWmpjq3/X4/YQQAlt3y5dthYWF64IEHdOLECUVGRqq6ulrl5eUBNWVlZYqMjJQkRUZGXrOKruH2H6vxeDw/GXZut1sejydgAwDYdcuD6NKlSzp58qS6deumIUOGqHXr1srJyXHGi4uLVVpaKq/XK0nyer06cuSIzp4969RkZ2fL4/EoJibGqbn6HA01DecAALQczR5EL7/8svLy8vTVV19pz549evLJJxUSEqKpU6cqNDRUM2fOVGpqqj7++GMVFBRoxowZ8nq9GjFihCRpzJgxiomJ0bPPPqvPP/9cO3fu1KJFi5ScnCy32y1Jmj17tn7/+9/rlVde0fHjx7V69Wpt3LhR8+fPb+52AAC3WLN/RvTNN99o6tSpOn/+vLp27apHHnlEe/fuVdeuXSVJv/71rxUcHKxJkyapqqpKCQkJWr16tXN8SEiItm3bpjlz5sjr9eqee+5RUlKSXn/9daemV69e2r59u+bPn68VK1aoe/fueuedd5SQkNDc7QAAbrFm/x5RS8L3iACg+dxR3yMCAOB6EUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVU0Oot27d+uJJ55QVFSUgoKCtGXLloBxY4wWL16sbt26qW3btoqPj9eXX34ZUHPhwgVNmzZNHo9HYWFhmjlzpi5duhRQc/jwYT366KNq06aNoqOjtWzZsmvmsmnTJvXr109t2rRRbGysPvroo6a2AwCwrMlBVFlZqYEDB2rVqlWNji9btkxvvfWW1q5dq3379umee+5RQkKCrly54tRMmzZNR48eVXZ2trZt26bdu3frhRdecMb9fr/GjBmjnj17qqCgQMuXL9fSpUv19ttvOzV79uzR1KlTNXPmTB06dEgTJ07UxIkTVVRU1NSWAAAWBRljzA0fHBSkzZs3a+LEiZJ+eDcUFRWll156SS+//LIkqaKiQhEREcrMzNSUKVP0xRdfKCYmRgcOHNDQoUMlSVlZWRo/fry++eYbRUVFac2aNXr11Vfl8/nkcrkkSQsXLtSWLVt0/PhxSdLkyZNVWVmpbdu2OfMZMWKEBg0apLVr1zY636qqKlVVVTm3/X6/oqOj9ZgmqFVQ6xu9GwAAkmpNjXK1VRUVFfJ4PNd9XLN+RlRSUiKfz6f4+HhnX2hoqOLi4pSfny9Jys/PV1hYmBNCkhQfH6/g4GDt27fPqRk5cqQTQpKUkJCg4uJifffdd07N1ddpqGm4TmMyMjIUGhrqbNHR0TffNADgpjRrEPl8PklSREREwP6IiAhnzOfzKTw8PGC8VatW6tSpU0BNY+e4+ho/VtMw3pj09HRVVFQ426lTp5raIgCgmbWyPYHbye12y+12254GAOAqzfqOKDIyUpJUVlYWsL+srMwZi4yM1NmzZwPGa2trdeHChYCaxs5x9TV+rKZhHADQMjRrEPXq1UuRkZHKyclx9vn9fu3bt09er1eS5PV6VV5eroKCAqdm165dqq+vV1xcnFOze/du1dTUODXZ2dnq27evOnbs6NRcfZ2GmobrAABahiYH0aVLl1RYWKjCwkJJPyxQKCwsVGlpqYKCgjRv3jz94z/+o373u9/pyJEj+vnPf66oqChnZd2DDz6osWPHatasWdq/f78+/fRTpaSkaMqUKYqKipIkPfPMM3K5XJo5c6aOHj2qDRs2aMWKFUpNTXXm8eKLLyorK0u/+tWvdPz4cS1dulQHDx5USkrKzd8rAIDbpsnLt3NzczVq1Khr9iclJSkzM1PGGC1ZskRvv/22ysvL9cgjj2j16tV64IEHnNoLFy4oJSVFH374oYKDgzVp0iS99dZbat++vVNz+PBhJScn68CBA+rSpYvmzp2rtLS0gGtu2rRJixYt0ldffaX7779fy5Yt0/jx46+7F7/fr9DQUJZvA0AzuNHl2zf1PaKWjiACgOZzR3yPCACApiKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKuaHES7d+/WE088oaioKAUFBWnLli0B49OnT1dQUFDANnbs2ICaCxcuaNq0afJ4PAoLC9PMmTN16dKlgJrDhw/r0UcfVZs2bRQdHa1ly5ZdM5dNmzapX79+atOmjWJjY/XRRx81tR0AgGVNDqLKykoNHDhQq1at+tGasWPH6syZM872/vvvB4xPmzZNR48eVXZ2trZt26bdu3frhRdecMb9fr/GjBmjnj17qqCgQMuXL9fSpUv19ttvOzV79uzR1KlTNXPmTB06dEgTJ07UxIkTVVRU1NSWAAAWBRljzA0fHBSkzZs3a+LEic6+6dOnq7y8/Jp3Sg2++OILxcTE6MCBAxo6dKgkKSsrS+PHj9c333yjqKgorVmzRq+++qp8Pp9cLpckaeHChdqyZYuOHz8uSZo8ebIqKyu1bds259wjRozQoEGDtHbt2kavXVVVpaqqKue23+9XdHS0HtMEtQpqfaN3AwBAUq2pUa62qqKiQh6P57qPuyWfEeXm5io8PFx9+/bVnDlzdP78eWcsPz9fYWFhTghJUnx8vIKDg7Vv3z6nZuTIkU4ISVJCQoKKi4v13XffOTXx8fEB101ISFB+fv6PzisjI0OhoaHOFh0d3Sz9AgBuXLMH0dixY/Xv//7vysnJ0T/90z8pLy9P48aNU11dnSTJ5/MpPDw84JhWrVqpU6dO8vl8Tk1ERERATcPtP1bTMN6Y9PR0VVRUONupU6durlkAwE1r1dwnnDJlivNzbGysBgwYoPvuu0+5ubkaPXp0c1+uSdxut9xut9U5AAAC3fLl271791aXLl104sQJSVJkZKTOnj0bUFNbW6sLFy4oMjLSqSkrKwuoabj9x2oaxgEALcMtD6JvvvlG58+fV7du3SRJXq9X5eXlKigocGp27dql+vp6xcXFOTW7d+9WTU2NU5Odna2+ffuqY8eOTk1OTk7AtbKzs+X1em91SwCAZtTkILp06ZIKCwtVWFgoSSopKVFhYaFKS0t16dIlLViwQHv37tVXX32lnJwcTZgwQX369FFCQoIk6cEHH9TYsWM1a9Ys7d+/X59++qlSUlI0ZcoURUVFSZKeeeYZuVwuzZw5U0ePHtWGDRu0YsUKpaamOvN48cUXlZWVpV/96lc6fvy4li5dqoMHDyolJaUZ7hYAwO3S5OXbubm5GjVq1DX7k5KStGbNGk2cOFGHDh1SeXm5oqKiNGbMGP3DP/xDwMKCCxcuKCUlRR9++KGCg4M1adIkvfXWW2rfvr1Tc/jwYSUnJ+vAgQPq0qWL5s6dq7S0tIBrbtq0SYsWLdJXX32l+++/X8uWLdP48eOvuxe/36/Q0FCWbwNAM7jR5ds39T2ilo4gAoDmc0d9jwgAgOtFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArCKIAABWEUQAAKsIIgCAVQQRAMAqgggAYBVBBACwiiACAFhFEAEArGpSEGVkZGjYsGHq0KGDwsPDNXHiRBUXFwfUXLlyRcnJyercubPat2+vSZMmqaysLKCmtLRUiYmJateuncLDw7VgwQLV1tYG1OTm5mrw4MFyu93q06ePMjMzr5nPqlWrdO+996pNmzaKi4vT/v37m9IOAOAO0KQgysvLU3Jysvbu3avs7GzV1NRozJgxqqysdGrmz5+vDz/8UJs2bVJeXp5Onz6tp556yhmvq6tTYmKiqqurtWfPHr333nvKzMzU4sWLnZqSkhIlJiZq1KhRKiws1Lx58/T8889r586dTs2GDRuUmpqqJUuW6LPPPtPAgQOVkJCgs2fP3sz9AQC4zYKMMeZGDz537pzCw8OVl5enkSNHqqKiQl27dtW6dev09NNPS5KOHz+uBx98UPn5+RoxYoR27Nihxx9/XKdPn1ZERIQkae3atUpLS9O5c+fkcrmUlpam7du3q6ioyLnWlClTVF5erqysLElSXFychg0bppUrV0qS6uvrFR0drblz52rhwoXXNX+/36/Q0FA9pglqFdT6Ru8GAICkWlOjXG1VRUWFPB7PdR93U58RVVRUSJI6deokSSooKFBNTY3i4+Odmn79+qlHjx7Kz8+XJOXn5ys2NtYJIUlKSEiQ3+/X0aNHnZqrz9FQ03CO6upqFRQUBNQEBwcrPj7eqWlMVVWV/H5/wAYAsOuGg6i+vl7z5s3Tww8/rP79+0uSfD6fXC6XwsLCAmojIiLk8/mcmqtDqGG8Yeynavx+vy5fvqxvv/1WdXV1jdY0nKMxGRkZCg0Ndbbo6OimNw4AaFY3HETJyckqKirS+vXrm3M+t1R6eroqKiqc7dSpU7anBAB3vVY3clBKSoq2bdum3bt3q3v37s7+yMhIVVdXq7y8POBdUVlZmSIjI52aP1zd1rCq7uqaP1xpV1ZWJo/Ho7Zt2yokJEQhISGN1jScozFut1tut7vpDQMAbpkmvSMyxiglJUWbN2/Wrl271KtXr4DxIUOGqHXr1srJyXH2FRcXq7S0VF6vV5Lk9Xp15MiRgNVt2dnZ8ng8iomJcWquPkdDTcM5XC6XhgwZElBTX1+vnJwcpwYA0DI06R1RcnKy1q1bp61bt6pDhw7O5zGhoaFq27atQkNDNXPmTKWmpqpTp07yeDyaO3euvF6vRowYIUkaM2aMYmJi9Oyzz2rZsmXy+XxatGiRkpOTnXcrs2fP1sqVK/XKK6/oueee065du7Rx40Zt377dmUtqaqqSkpI0dOhQDR8+XG+++aYqKys1Y8aM5rpvAAC3QZOWbwcFBTW6/91339X06dMl/fCF1pdeeknvv/++qqqqlJCQoNWrVwf8yezrr7/WnDlzlJubq3vuuUdJSUl644031KrV/+Vibm6u5s+fr2PHjql79+765S9/6VyjwcqVK7V8+XL5fD4NGjRIb731luLi4q67eZZvA0DzudHl2zf1PaKWjiACgOZj5XtEAADcLIIIAGAVQQQAsIogAgBYRRABAKwiiAAAVhFEAACrCCIAgFUEEQDAKoIIAGAVQQQAsIogAgBYRRABAKwiiAAAVhFEAACrCCIAgFUEEQDAKoIIAGAVQQQAsIogAgBYRRABAKwiiAAAVhFEAACrCCIAgFUEEQDAKoIIAGAVQQQAsIogAgBYRRABAKwiiAAAVhFEAACrCCIAgFUEEQDAKoIIAGAVQQQAsIogAgBYRRABAKwiiAAAVhFEAACrCCIAgFUEEQDAKoIIAGAVQQQAsKpJQZSRkaFhw4apQ4cOCg8P18SJE1VcXBxQ89hjjykoKChgmz17dkBNaWmpEhMT1a5dO4WHh2vBggWqra0NqMnNzdXgwYPldrvVp08fZWZmXjOfVatW6d5771WbNm0UFxen/fv3N6UdAMAdoElBlJeXp+TkZO3du1fZ2dmqqanRmDFjVFlZGVA3a9YsnTlzxtmWLVvmjNXV1SkxMVHV1dXas2eP3nvvPWVmZmrx4sVOTUlJiRITEzVq1CgVFhZq3rx5ev7557Vz506nZsOGDUpNTdWSJUv02WefaeDAgUpISNDZs2dv9L4AAFgQZIwxN3rwuXPnFB4erry8PI0cOVLSD++IBg0apDfffLPRY3bs2KHHH39cp0+fVkREhCRp7dq1SktL07lz5+RyuZSWlqbt27erqKjIOW7KlCkqLy9XVlaWJCkuLk7Dhg3TypUrJUn19fWKjo7W3LlztXDhwuuav9/vV2hoqB7TBLUKan2jdwMAQFKtqVGutqqiokIej+e6j7upz4gqKiokSZ06dQrY/9vf/lZdunRR//79lZ6eru+//94Zy8/PV2xsrBNCkpSQkCC/36+jR486NfHx8QHnTEhIUH5+viSpurpaBQUFATXBwcGKj493ahpTVVUlv98fsAEA7Gp1owfW19dr3rx5evjhh9W/f39n/zPPPKOePXsqKipKhw8fVlpamoqLi/XBBx9Iknw+X0AISXJu+3y+n6zx+/26fPmyvvvuO9XV1TVac/z48R+dc0ZGhl577bUbbRkAcAvccBAlJyerqKhIn3zyScD+F154wfk5NjZW3bp10+jRo3Xy5Endd999Nz7TZpCenq7U1FTntt/vV3R0tMUZAQBuKIhSUlK0bds27d69W927d//J2ri4OEnSiRMndN999ykyMvKa1W1lZWWSpMjISOd/G/ZdXePxeNS2bVuFhIQoJCSk0ZqGczTG7XbL7XZfX5MAgNuiSZ8RGWOUkpKizZs3a9euXerVq9cfPaawsFCS1K1bN0mS1+vVkSNHAla3ZWdny+PxKCYmxqnJyckJOE92dra8Xq8kyeVyaciQIQE19fX1ysnJcWoAAC1Dk94RJScna926ddq6das6dOjgfKYTGhqqtm3b6uTJk1q3bp3Gjx+vzp076/Dhw5o/f75GjhypAQMGSJLGjBmjmJgYPfvss1q2bJl8Pp8WLVqk5ORk593K7NmztXLlSr3yyit67rnntGvXLm3cuFHbt2935pKamqqkpCQNHTpUw4cP15tvvqnKykrNmDGjue4bAMBt0KTl20FBQY3uf/fddzV9+nSdOnVKf/u3f6uioiJVVlYqOjpaTz75pBYtWhSwlO/rr7/WnDlzlJubq3vuuUdJSUl644031KrV/+Vibm6u5s+fr2PHjql79+765S9/qenTpwdcd+XKlVq+fLl8Pp8GDRqkt956y/lT4PVg+TYANJ8bXb59U98jaukIIgBoPla+RwQAwM0iiAAAVhFEAACrCCIAgFUEEQDAKoIIAGAVQQQAsIogAgBYRRABAKwiiAAAVhFEAACrCCIAgFUEEQDAKoIIAGAVQQQAsIogAgBYRRABAKwiiAAAVhFEAACrCCIAgFUEEQDAKoIIAGAVQQQAsIogAgBYRRABAKwiiAAAVhFEAACrCCIAgFUEEQDAKoIIAGAVQQQAsIogAgBYRRABAKwiiAAAVhFEAACrCCIAgFUEEQDAKoIIAGAVQQQAsIogAgBYRRABAKwiiAAAVhFEAACrmhREa9as0YABA+TxeOTxeOT1erVjxw5n/MqVK0pOTlbnzp3Vvn17TZo0SWVlZQHnKC0tVWJiotq1a6fw8HAtWLBAtbW1ATW5ubkaPHiw3G63+vTpo8zMzGvmsmrVKt17771q06aN4uLitH///qa0AgC4QzQpiLp376433nhDBQUFOnjwoP7yL/9SEyZM0NGjRyVJ8+fP14cffqhNmzYpLy9Pp0+f1lNPPeUcX1dXp8TERFVXV2vPnj167733lJmZqcWLFzs1JSUlSkxM1KhRo1RYWKh58+bp+eef186dO52aDRs2KDU1VUuWLNFnn32mgQMHKiEhQWfPnr3Z+wMAcJsFGWPMzZygU6dOWr58uZ5++ml17dpV69at09NPPy1JOn78uB588EHl5+drxIgR2rFjhx5//HGdPn1aERERkqS1a9cqLS1N586dk8vlUlpamrZv366ioiLnGlOmTFF5ebmysrIkSXFxcRo2bJhWrlwpSaqvr1d0dLTmzp2rhQsXXvfc/X6/QkND9ZgmqFVQ65u5GwDgrldrapSrraqoqJDH47nu4274M6K6ujqtX79elZWV8nq9KigoUE1NjeLj452afv36qUePHsrPz5ck5efnKzY21gkhSUpISJDf73feVeXn5weco6Gm4RzV1dUqKCgIqAkODlZ8fLxT82Oqqqrk9/sDNgCAXU0OoiNHjqh9+/Zyu92aPXu2Nm/erJiYGPl8PrlcLoWFhQXUR0REyOfzSZJ8Pl9ACDWMN4z9VI3f79fly5f17bffqq6urtGahnP8mIyMDIWGhjpbdHR0U9sHADSzJgdR3759VVhYqH379mnOnDlKSkrSsWPHbsXcml16eroqKiqc7dSpU7anBAB3vVZNPcDlcqlPnz6SpCFDhujAgQNasWKFJk+erOrqapWXlwe8KyorK1NkZKQkKTIy8prVbQ2r6q6u+cOVdmVlZfJ4PGrbtq1CQkIUEhLSaE3DOX6M2+2W2+1uassAgFvopr9HVF9fr6qqKg0ZMkStW7dWTk6OM1ZcXKzS0lJ5vV5Jktfr1ZEjRwJWt2VnZ8vj8SgmJsapufocDTUN53C5XBoyZEhATX19vXJycpwaAEDL0aR3ROnp6Ro3bpx69Oihixcvat26dcrNzdXOnTsVGhqqmTNnKjU1VZ06dZLH49HcuXPl9Xo1YsQISdKYMWMUExOjZ599VsuWLZPP59OiRYuUnJzsvFOZPXu2Vq5cqVdeeUXPPfecdu3apY0bN2r79u3OPFJTU5WUlKShQ4dq+PDhevPNN1VZWakZM2Y0410DALgdmhREZ8+e1c9//nOdOXNGoaGhGjBggHbu3Km/+qu/kiT9+te/VnBwsCZNmqSqqiolJCRo9erVzvEhISHatm2b5syZI6/Xq3vuuUdJSUl6/fXXnZpevXpp+/btmj9/vlasWKHu3bvrnXfeUUJCglMzefJknTt3TosXL5bP59OgQYOUlZV1zQIGAMCd76a/R9SS8T0iAGg+t/17RAAANAeCCABgFUEEALCKIAIAWEUQAQCsIogAAFYRRAAAqwgiAIBVBBEAwCqCCABgFUEEALCKIAIAWEUQAQCsIogAAFYRRAAAqwgiAIBVBBEAwCqCCABgFUEEALCKIAIAWEUQAQCsIogAAFYRRAAAqwgiAIBVBBEAwCqCCABgFUEEALCKIAIAWEUQAQCsIogAAFYRRAAAqwgiAIBVBBEAwCqCCABgFUEEALCKIAIAWEUQAQCsIogAAFa1sj0Bm4wxkqRa1UjG8mQAoIWrVY2k//vder3u6iA6f/68JOkTfWR5JgDwp+PixYsKDQ297vq7Oog6deokSSotLW3SnXYn8/v9io6O1qlTp+TxeGxPp9nQV8tCXy1Lc/VljNHFixcVFRXVpOPu6iAKDv7hI7LQ0NA/qSeVJHk8nj+5niT6amnoq2Vpjr5u5P/Us1gBAGAVQQQAsOquDiK3260lS5bI7Xbbnkqz+VPsSaKvloa+WhbbfQWZpq6zAwCgGd3V74gAAPYRRAAAqwgiAIBVBBEAwCqCCABg1V0bRKtWrdK9996rNm3aKC4uTvv377c9JcfSpUsVFBQUsPXr188Zv3LlipKTk9W5c2e1b99ekyZNUllZWcA5SktLlZiYqHbt2ik8PFwLFixQbW1tQE1ubq4GDx4st9utPn36KDMzs1n72L17t5544glFRUUpKChIW7ZsCRg3xmjx4sXq1q2b2rZtq/j4eH355ZcBNRcuXNC0adPk8XgUFhammTNn6tKlSwE1hw8f1qOPPqo2bdooOjpay5Ytu2YumzZtUr9+/dSmTRvFxsbqo49u/N8X/GN9TZ8+/ZrHb+zYsXd0XxkZGRo2bJg6dOig8PBwTZw4UcXFxQE1t/N511yvz+vp67HHHrvm8Zo9e/Yd3deaNWs0YMAA519C8Hq92rFjhzPe4h4rcxdav369cblc5t/+7d/M0aNHzaxZs0xYWJgpKyuzPTVjjDFLliwxDz30kDlz5oyznTt3zhmfPXu2iY6ONjk5OebgwYNmxIgR5s///M+d8draWtO/f38THx9vDh06ZD766CPTpUsXk56e7tT8/ve/N+3atTOpqanm2LFj5je/+Y0JCQkxWVlZzdbHRx99ZF599VXzwQcfGElm8+bNAeNvvPGGCQ0NNVu2bDGff/65+eu//mvTq1cvc/nyZadm7NixZuDAgWbv3r3mv//7v02fPn3M1KlTnfGKigoTERFhpk2bZoqKisz7779v2rZta/71X//Vqfn0009NSEiIWbZsmTl27JhZtGiRad26tTly5Mgt6SspKcmMHTs24PG7cOFCQM2d1ldCQoJ59913TVFRkSksLDTjx483PXr0MJcuXXJqbtfzrjlfn9fT11/8xV+YWbNmBTxeFRUVd3Rfv/vd78z27dvN//zP/5ji4mLzi1/8wrRu3doUFRUZY1reY3VXBtHw4cNNcnKyc7uurs5ERUWZjIwMi7P6P0uWLDEDBw5sdKy8vNy0bt3abNq0ydn3xRdfGEkmPz/fGPPDL8rg4GDj8/mcmjVr1hiPx2OqqqqMMca88sor5qGHHgo49+TJk01CQkIzd/ODP/yFXV9fbyIjI83y5cudfeXl5cbtdpv333/fGGPMsWPHjCRz4MABp2bHjh0mKCjI/O///q8xxpjVq1ebjh07On0ZY0xaWprp27evc/tv/uZvTGJiYsB84uLizN/93d81e1/G/BBEEyZM+NFjWkJfZ8+eNZJMXl6eMeb2Pu9u5evzD/sy5ocgevHFF3/0mJbQlzHGdOzY0bzzzjst8rG66/40V11drYKCAsXHxzv7goODFR8fr/z8fIszC/Tll18qKipKvXv31rRp01RaWipJKigoUE1NTcD8+/Xrpx49ejjzz8/PV2xsrCIiIpyahIQE+f1+HT161Km5+hwNNbfrPigpKZHP5wuYQ2hoqOLi4gL6CAsL09ChQ52a+Ph4BQcHa9++fU7NyJEj5XK5AvooLi7Wd99959Tc7l5zc3MVHh6uvn37as6cOc5/cqSl9FVRUSHp//6F+tv1vLvVr88/7KvBb3/7W3Xp0kX9+/dXenq6vv/+e2fsTu+rrq5O69evV2Vlpbxeb4t8rO66f33722+/VV1dXcADIEkRERE6fvy4pVkFiouLU2Zmpvr27aszZ87otdde06OPPqqioiL5fD65XC6FhYUFHBMRESGfzydJ8vl8jfbXMPZTNX6/X5cvX1bbtm1vUXcKmEdjc7h6juHh4QHjrVq1UqdOnQJqevXqdc05GsY6duz4o702nKO5jR07Vk899ZR69eqlkydP6he/+IXGjRun/Px8hYSE3PF91dfXa968eXr44YfVv39/55q343n33Xff3bLXZ2N9SdIzzzyjnj17KioqSocPH1ZaWpqKi4v1wQcf3NF9HTlyRF6vV1euXFH79u21efNmxcTEqLCwsMU9VnddELUE48aNc34eMGCA4uLi1LNnT23cuPGWBwRu3pQpU5yfY2NjNWDAAN13333Kzc3V6NGjLc7s+iQnJ6uoqEiffPKJ7ak0qx/r64UXXnB+jo2NVbdu3TR69GidPHlS99133+2e5nXr27evCgsLVVFRof/4j/9QUlKS8vLybE/rhtx1f5rr0qWLQkJCrllBUlZWpsjISEuz+mlhYWF64IEHdOLECUVGRqq6ulrl5eUBNVfPPzIystH+GsZ+qsbj8dyWsGuYx089DpGRkTp79mzAeG1trS5cuNAsvd6ux7t3797q0qWLTpw44cznTu0rJSVF27Zt08cff6zu3bs7+2/X8+5WvT5/rK/GxMXFSVLA43Un9uVyudSnTx8NGTJEGRkZGjhwoFasWNEiH6u7LohcLpeGDBminJwcZ199fb1ycnLk9XotzuzHXbp0SSdPnlS3bt00ZMgQtW7dOmD+xcXFKi0tdebv9Xp15MiRgF922dnZ8ng8iomJcWquPkdDze26D3r16qXIyMiAOfj9fu3bty+gj/LychUUFDg1u3btUn19vfPLwuv1avfu3aqpqQnoo2/fvurYsaNTY7PXb775RufPn1e3bt2c+dxpfRljlJKSos2bN2vXrl3X/Fnwdj3vmvv1+cf6akxhYaEkBTxed1pfjamvr1dVVVXLfKyatLThT8T69euN2+02mZmZ5tixY+aFF14wYWFhAStIbHrppZdMbm6uKSkpMZ9++qmJj483Xbp0MWfPnjXG/LA0s0ePHmbXrl3m4MGDxuv1Gq/X6xzfsDRzzJgxprCw0GRlZZmuXbs2ujRzwYIF5osvvjCrVq1q9uXbFy9eNIcOHTKHDh0yksy//Mu/mEOHDpmvv/7aGPPD8u2wsDCzdetWc/jwYTNhwoRGl2//2Z/9mdm3b5/55JNPzP333x+wzLm8vNxERESYZ5991hQVFZn169ebdu3aXbPMuVWrVuaf//mfzRdffGGWLFlyU8u3f6qvixcvmpdfftnk5+ebkpIS81//9V9m8ODB5v777zdXrly5Y/uaM2eOCQ0NNbm5uQHLmL///nun5nY975rz9fnH+jpx4oR5/fXXzcGDB01JSYnZunWr6d27txk5cuQd3dfChQtNXl6eKSkpMYcPHzYLFy40QUFB5j//8z+NMS3vsborg8gYY37zm9+YHj16GJfLZYYPH2727t1re0qOyZMnm27duhmXy2V+9rOfmcmTJ5sTJ04445cvXzZ///d/bzp27GjatWtnnnzySXPmzJmAc3z11Vdm3Lhxpm3btqZLly7mpZdeMjU1NQE1H3/8sRk0aJBxuVymd+/e5t13323WPj7++GMj6ZotKSnJGPPDEu5f/vKXJiIiwrjdbjN69GhTXFwccI7z58+bqVOnmvbt2xuPx2NmzJhhLl68GFDz+eefm0ceecS43W7zs5/9zLzxxhvXzGXjxo3mgQceMC6Xyzz00ENm+/btt6Sv77//3owZM8Z07drVtG7d2vTs2dPMmjXrmhfmndZXY/1ICnhO3M7nXXO9Pv9YX6WlpWbkyJGmU6dOxu12mz59+pgFCxYEfI/oTuzrueeeMz179jQul8t07drVjB492gkhY1reY8V/jwgAYNVd9xkRAODOQhABAKwiiAAAVhFEAACrCCIAgFUEEQDAKoIIAGAVQQQAsIogAgBYRRABAKwiiAAAVv0/Rgd5CariuE8AAAAASUVORK5CYII=",
+      "text/plain": [
+       "<Figure size 640x480 with 1 Axes>"
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "import matplotlib.pylab as plt\n",
+    "\n",
+    "plt.imshow(channel)\n",
+    "# print(resized)\n",
+    "# plt.imshow([finalInstance==3])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import matplotlib.pylab as plt\n",
+    "\n",
+    "plt.imshow(finalInstance==4)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "torch",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.9.13"
+  },
+  "orig_nbformat": 4
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/show_npy.py b/show_npy.py
new file mode 100644
index 0000000..055fa5c
--- /dev/null
+++ b/show_npy.py
@@ -0,0 +1,14 @@
+import numpy as np
+import os
+from PIL import Image
+import matplotlib.pylab as plt
+
+WSIrootFolder = '/homeStor1/ylan/data/MarkusRinschen/debug_2/npyFiles/'#'SPECIFIED WSI FOLDER'
+# print(os.listdir(WSIrootFolder))
+finalInstance = np.load(WSIrootFolder+f'p21_kidleftp456_B10_PAS_3_finalInstancePrediction.npy')
+# resultWSI = np.load(WSIrootFolder+f'p21_kidleftp456_B10_PAS_3_resultWSI.npy')
+
+# resized = np.resize(finalInstance, (1000, 1000))
+print(finalInstance==3)
+channel = finalInstance == 3
+resized = np.resize(channel, (1000, 1000))
\ No newline at end of file
diff --git a/training.py b/training.py
index ccb6d82..149c934 100644
--- a/training.py
+++ b/training.py
@@ -332,7 +332,7 @@ def set_up_training(modelString, setting, epochs, batchSize, lrate, weightDecay,
     tbWriter = SummaryWriter(log_dir=tensorboardPath)
 
     if modelString == 'custom':
-        model = Custom(input_ch=3, output_ch=8, modelDim=2)
+        model = Custom(input_ch=3, output_ch=8, modelDim=2) # one channel per class
     elif modelString == 'nnunet':
         model = Generic_UNet(input_channels=3, num_classes=8, base_num_features=30, num_pool=7, final_nonlin = None, deep_supervision=False, dropout_op_kwargs = {'p': 0.0, 'inplace': True})
     else:
diff --git a/utils.py b/utils.py
index 7ba1996..53e6972 100644
--- a/utils.py
+++ b/utils.py
@@ -18,6 +18,8 @@ import torch.nn as nn
 from scipy.ndimage.measurements import label
 from scipy.ndimage.morphology import binary_dilation, binary_fill_holes
 
+from PIL import Image
+
 
 colors = torch.tensor([[  0,   0,   0], # Black
                        [255,   0,   0], # Red
@@ -42,7 +44,7 @@ def getBoundingBox(img):
 
 # Generates a 2d ball of a specified radius representing a structuring element for morphological operations
 def generate_ball(radius):
-    structure = np.zeros((3, 3), dtype=np.int)
+    structure = np.zeros((3, 3), dtype=int)
     structure[1, :] = 1
     structure[:, 1] = 1
 
@@ -50,7 +52,7 @@ def generate_ball(radius):
     ball[radius, radius] = 1
     for i in range(radius):
         ball = binary_dilation(ball, structure=structure)
-    return np.asarray(ball, dtype=np.int)
+    return np.asarray(ball, dtype=int)
 
 
 def convert_labelmap_to_rgb(labelmap):
@@ -187,6 +189,7 @@ def savePredictionResultsWithoutDilation(prediction, fullResultPath, figSize):
 
 # Visualizes prediction and image overlay after dilating tubules
 def savePredictionOverlayResults(img, prediction, fullResultPath, figSize, alpha=0.4):
+    
     predictionMask = np.ma.masked_where(prediction == 0, prediction)
 
     colorMap = np.array([[0, 0, 0],  # Black
@@ -200,7 +203,38 @@ def savePredictionOverlayResults(img, prediction, fullResultPath, figSize, alpha
                          [128, 0, 128],  # Purple
                          [255, 140, 0],  # Orange
                          [255, 255, 255]], dtype=np.uint8)  # White
+    
+
+    
+    
+    # pil_img = Image.fromarray(img.transpose(1,2,0)).convert('L')
+
+    # # print(pil_mask.size)
+    # overlaid_image = Image.new('RGBA', pil_img.size)
+    # for i in range(prediction.shape[0]):
+    #     pil_mask = Image.fromarray(prediction[i]).convert('L')
+    #     pil_mask = pil_mask.point(lambda p: 255 if p<0 else 0)
+
+    #     color = colorMap[i]
+    #     overlaid_image = overlaid_image.composite(pil_img, pil_mask, color)
+
+    # overlaid_image.save('fullResultPath')
+
+    # if prediction.shape[0] == 8:
+    #     prediction = prediction.transpose(1,2,0)
+
+    # overlayed_image = Image.fromarray(img)
+    # print(prediction.shape)
+    # for i, color in enumerate(colorMap):
+    #     class_mask = prediction == i
+    #     # class_mask = prediction[:,:, i] > 
+    #     class_mask_rgb = np.zeros_like(, dtype=np.uint8)
+    #     class_color = colorMap[i]
+    #     overlayed_image.paste(Image.new('RGB', overlayed_image.size, tuple(class_color)), mask=Image.fromarray(class_mask))
 
+    # overlayed_image.save(fullResultPath)
+
+    #######################################################
     newRandomColors = np.random.randint(low=0, high=256, dtype=np.uint8, size=(prediction.max(), 3))
     colorMap = np.concatenate((colorMap, newRandomColors))
     colorMap = colorMap / 255.
@@ -209,7 +243,10 @@ def savePredictionOverlayResults(img, prediction, fullResultPath, figSize, alpha
     assert prediction.max() < max_number_of_labels, 'Too many labels -> Not enough colors available in custom colormap! Add some colors!'
     customColorMap = mpl.colors.ListedColormap(colorMap)
 
+
+
     fig = plt.figure(figsize=figSize)
+
     ax = plt.Axes(fig, [0., 0., 1., 1., ])
     ax.set_axis_off()
     fig.add_axes(ax)
@@ -255,6 +292,8 @@ def savePredictionOverlayResults_Fast(img, prediction, fullResultPath, figHeight
     assert prediction.max() < max_number_of_labels, 'Too many labels -> Not enough colors available in custom colormap! Add some colors!'
     customColorMap = mpl.colors.ListedColormap(colorMap)
 
+
+
     fig = plt.figure(figsize=(figHeight*prediction.shape[1]/prediction.shape[0], figHeight))
     ax = plt.Axes(fig, [0., 0., 1., 1., ])
     ax.set_axis_off()
-- 
GitLab