CentOS 5.5 i386 on VMware Fusion3
Erlang R14B01
pam-devel が必要なので、事前にインストール。
# yum install pam-devel # ./configure --prefix=/usr/local --localstatedir=/var --sysconfdir=/etc # make && make install
# yum install pam-devel # ./configure --prefix=/usr/local --localstatedir=/var --sysconfdir=/etc # make && make install
1> 4/2. 2.0 2> 4 div 2. 2
1> hello. hello 2> Hello. * 1: variable 'Hello' is unbound 3> hello=hello. hello 4> Hello=hello. hello 5> Hello. hello
1> Person={person,{name,calmoka},{age,36}}. {person,{name,calmoka},{age,36}} 2> {_,{name,Name},_}=Person. {person,{name,calmoka},{age,36}} 3> Name. calmoka
1> [99,97,116]. "cat" 2> [H|T]="cat". "cat" 3> H. 99 4> T. "at"
1> X=123. 123 2> f(). ok 3> X=234. 234
[F(X) || X <- L]. 1> [X * 2 || X <- [1,2,3,4,5]]. [2,4,6,8,10]
f(X) = when (X == 0) or (1/X > 2) ... g(X) = when (X == 0) orelse (1/X > 2) ...X = 0 の時、f(X) は一致しない。
is_atom(X) is_binary(X) is_constant(X) is_float(X) is_function(X) is_function(X, N) // 引数N個 is_integer(X) is_list(X) is_number(X) is_pid(X) is_port(X) is_reference(X) is_tuple(X) is_record(X, Tag) is_record(X, Tag, N) // TagのレコードでNサイズ abs(X) element(N, X) // タプルXのN番目の要素 float(X) hd(X) // リストXのヘッド length(X) node() // 現在のノード node(X) // X(プロセス、リファレンス、ポート)が作られたノード round(X) // 整数に四捨五入 self() // 現在のプロセスのPID size(X) trunc(X) // Xを整数に切り捨てる tl(X) // リストXのテールhttp://erldocs.com/R14B01/erts/erlang.html?i=0&search=erlang#undefined
-record(rec, { key1=default, key2, key3 }). % レコード定義は .hrl ファイル内で。 1> rr("record.hrl"). rec 2> X = #rec{key2=key2}. #rec{key1 = default,key2 = key2,key3 = undefined} % rf() で、レコード定義をクリア、各変数はタプルになる。 3> rf(rec). ok 4> X. {rec,default,key2,undefined}
filter(P, [H|T]) -> case P(H) of true -> [H|filter(P, T)]; false -> filter(P, T) end; filter(P, []) -> []. case Expression of Pattern1 [When Guard1] -> Expr_seq1; Pattern2 [When Guard2] -> Expr_seq2 end % ついでにif文 if Guard1 -> Expr_seq1; Guard2 -> Expr_seq2; true -> Expr_seq3 end
odds_and_evens(L) -> odds_and_evens(L, [], []). odds_and_evens([H|T], Odds, Evens) -> case (H rem 2) of 1 -> odds_and_evens(T, Odds, [H|Evens]); 0 -> odds_and_evens(T, [H|Odds], Evens) end; odds_and_evens([], Odds, Evens) -> {lists:reverse(Odds), lists:reverse(Evens)}. 1> lib_misc:odds_and_evens([1,2,4,5,6,7,8,89,3]). {[2,4,6,8],[1,5,7,89,3]}
exit(Why) - プロセスを終了したい場合 throw(Why) - 呼び出し側が補足する可能性がある例外 erlang:error(Why) - クラッシュエラー 内部的に発生したエラーと同等
case f(X) of {ok, Val} -> ...; {error, Why} -> ... end,
try f(X) of Val -> ...R catch throw:{ExceptionA, Reason} -> ...; throw:{ExceptionB, Reason} -> ...; _:_ -> ...; exit:Ex -> ...; error:Ex -> ... end通常は、exit は補足しないほうがイイ?
1> R = 16#ff. 255 2> G = 16#66. 102 3> B = 16#00. 0 4> RGB = <<R:8, G:8, B:8>>. <<255,102,0>> 5> <<R1:8, G1:8, B1:8>> = RGB. <<255,102,0>> 6> R1. 255 7> G1. 102 8> B1. 0
webtool:start().
receive Pattern1 [when Guard1] -> Expression1; Pattern2 [when Guard2] -> Expression2 ... after Time -> Expression end.
spawn(fun() -> ... end)自分も死ぬ
spawn_link(fun() -> ... end)死んだことを知りたい
process_flag(trap_exit, true), spawn_link(fun() -> ... end) loop() -> receive {'EXIT', Pid, Reason} -> ... end
-compile(export_all).
パスの先頭に追加 $ erl -pa Dir1 -pa Dir2 > code:add_patha(Dir) パスの末尾に追加 $ erl -pz Dir1 -pz Dir2 > code:add_pathz(Dir)
-include_lib("kernel/include/file.hrl").
erl -sname node1 (node1@localhost)1> xxx:start(). ok (node1@localhost)2> rpc:call(node2@localhost, M, F, A).クライアントとサーバが別ホスト
erl -name node1 -cookie somecookie (node1@host1)1> xxx:start(). ok (node1@host1)2> rpc:call(node2@host2.example.com, M, F, A).
elr -name ... -setcookie ... -kernel inet_dist_listen_min Min inet_dist_listen_max Max
spawn('somenode@somehost.example.com', Mod, Fun, Args).
rpc:call(Node, Mod, Fun, Args). など
Str = lists:flatten(io_lib:format("foo ~p ~p", [bar, hoge])).
dialyzer --build_plt --apps erts kernel stdlib crypto compiler hipe dialyzer -Wunmatched_returns -Werror_handling -Wrace_conditions ... -c hoge.erl
$ sudo port selfupdate
$ sudo port clean --dist outdated $ sudo port upgrade outdated $ sudo port -u uninstall
<key>CFBundleName</key> <string>SpaSearch</string> <key>CFBundleDisplayName</key> <string>SpaSearch</string>
CFBundleName = "温泉サーチ" CFBundleDisplayName = "温泉サーチ(暫定版)"
id default = [NSUserDefaults standardUserDefaults]; id dic = [default dictionaryRepresentation]; id str = [[NSString alloc] initWithFormat:@"Data=%@" locale:dic, [NSDate date]];みたいな。
NSAssert(condition, NSString *description [, arg, ...]); NSAssert(x > y, @"Illegal values x(%d) y(%d)", x, y);
gcc hoge.m ... -DNS_BLOCK_ASSERTIONS ...
$ cd /Applications/CotEditor.app/Contents/MacOS/ $ lipo -detailed_info CotEditor Fat header in: CotEditor fat_magic 0xcafebabe nfat_arch 2 architecture ppc cputype CPU_TYPE_POWERPC cpusubtype CPU_SUBTYPE_POWERPC_ALL offset 4096 size 451032 align 2^12 (4096) architecture i386 cputype CPU_TYPE_I386 cpusubtype CPU_SUBTYPE_I386_ALL offset 458752 size 457544 align 2^12 (4096)
$ lipo -remove ppc -output CotEditor2 CotEditor $ ll -h total 2696 -rwxr-xr-x 1 user admin 895K 4 26 2009 CotEditor -rwxr-xr-x 1 user admin 451K 2 16 23:14 CotEditor2 $ lipo -detailed_info CotEditor2 Fat header in: CotEditor3 fat_magic 0xcafebabe nfat_arch 1 architecture i386 cputype CPU_TYPE_I386 cpusubtype CPU_SUBTYPE_I386_ALL offset 4096 size 457544 align 2^12 (4096)
public class DigitFilterFactory extends BaseTokenFilterFactory { public TokenStream create(TokenStream input) { return new DigitFilter(input); } }
public class DigitFilter extends TokenFilter { boolean preRead; String preTerm; String preType; int preStart; int preEnd; protected DigitFilter(TokenStream input) { super(input); preRead = false; preTerm = preType = null; preStart = preEnd = 0; } public Token next(Token token) throws IOException { if (preRead) { preRead = false; return preTerm == null ? null : token.reinit(preTerm, preStart, preEnd, preType); } Token t = input.next(token); if (t == null) return null; char[] c; // for termBuffer if (t.termLength() == 1 && Character.isDigit((c = t.termBuffer())[0])) { int start = t.startOffset(); int end = t.endOffset(); String type = t.type(); StringBuilder st = new StringBuilder(); st.append(c[0]); while (true) { t = input.next(token); if (t == null) { preRead = true; preTerm = null; break; } else if (t.termLength() != 1 || !Character.isDigit((c = t.termBuffer())[0])) { preRead = true; preTerm = new String(c, 0, t.termLength()); preStart = t.startOffset(); preEnd = t.endOffset(); preType = t.type(); break; } st.append(c[0]); end = t.endOffset(); } return token.reinit(st.toString(), start, end, type); } return t; } }これで、以下の fieldType ができました。
<fieldType name="text_ja" class="solr.TextField"> <analyzer> <charFilter class="solr.MappingCharFilterFactory" mapping="mapping-ja.txt" /> <tokenizer class="SenTokenizerFactory" /> <filter class="POSFilterFactory" deny="pos-deny.txt" /> <filter class="DigitFilterFactory" /> <filter class="solr.LowerCaseFilterFactory" /> <filter class="KatakanaStemFilterFactory" /> <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords-ja.txt" /> </analyzer> </fieldType>
public class POSFilterFactory extends BaseTokenFilterFactory implements ResourceLoaderAware { private Set<string> posSet; public void inform(ResourceLoader loader) { try { List<string> alist = loader.getLines(denyPOSFile); posSet = POSFilter.makePOSSet(alist); } catch (IOException e) { throw new RuntimeException(e); } } public TokenStream create(TokenStream input) { return new POSFilter(input, posSet); } }
public final class POSFilter extends TokenFilter { private final Set<string> posSet; public POSFilter(TokenStream input, Set<string> posSet) { super(input); this.posSet = posSet; } public final static Set<string> makePOSSet(List<string> posList) { if (posList == null) throw new NullPointerException("posList is null"); return new HashSet<string>(posList); } public final Token next(Token token) throws IOException { Token t; while (true) { t = input.next(token); if (t == null) return null; if (posSet == null || !posSet.contains(t.type())) break; } return t; } }
<fieldType name="text_ja" class="solr.TextField"> <analyzer> <charFilter class="solr.MappingCharFilterFactory" mapping="mapping-ja.txt" /> <tokenizer class="SenTokenizerFactory" /> <filter class="POSFilterFactory" deny="pos-deny.txt" /> </analyzer> </fieldType>
public class SenTokenizerFactory extends BaseTokenizerFactory { private static final Logger log = LoggerFactory.getLogger(SenTokenizerFactory.class); static final String PROP_SEN_HOME = "sen.home"; static final String JNDI_SEN_HOME = "sen/home"; static final String FS = System.getProperty("file.separator"); static final String SEN_XML = FS + "conf" + FS + "sen.xml"; String configFile; String compositRule; @Override public void init(Mapargs) { String senHome = null; // Try JNDI try { Context c = new InitialContext(); senHome = (String)c.lookup("java:comp/env/" + JNDI_SEN_HOME); log.info("Using JNDI sen/home: " + senHome); } catch (NoInitialContextException e) { log.info("JNDI not configured for Solr (NoInitialContextEx)"); } catch (NamingException e) { log.info("No sen/home in JNDI"); } catch (RuntimeException ex) { log.warn("Odd RuntimeException while testing for JNDI: " + ex.getMessage()); } // Now try system property if (senHome == null){ senHome = System.getProperty(PROP_SEN_HOME); log.info("Using System property sen.home: " + senHome); } // Set current path if (senHome == null) { senHome = "."; log.info("sen.home defaulted to '.' (could not find system property or JNDI)"); } configFile = senHome + SEN_XML; log.info( "config file for SenTokenizer is " + configFile ); readConfig(); log.info("conpositRule is: " + (compositRule == null ? "NULL" : compositRule)); } protected String getConfigFile() { return configFile; } private void readConfig() { List<String> compositRuleList = new ArrayList<String>(); try { DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); DocumentBuilder builder = factory.newDocumentBuilder(); Document doc = builder.parse(new InputSource(configFile)); NodeList nl = doc.getFirstChild().getChildNodes(); for (int i = 0; i < nl.getLength(); i++) { org.w3c.dom.Node n = nl.item(i); if (n.getNodeType() == org.w3c.dom.Node.ELEMENT_NODE) { String nn = n.getNodeName(); String value = n.getFirstChild().getNodeValue(); if (nn.equals("composit")) { compositRuleList.add(value); log.info("add composit rule: " + value); } } } if (compositRuleList.size() > 0) { compositRule = StringUtils.join(compositRuleList, "\n"); } } catch (ParserConfigurationException e) { throw new IllegalArgumentException(e.getMessage()); } catch (FileNotFoundException e) { throw new IllegalArgumentException(e.getMessage()); } catch (SAXException e) { throw new IllegalArgumentException(e.getMessage()); } catch (IOException e) { throw new IllegalArgumentException(e.getMessage()); } } public Tokenizer create(Reader input) { try { return new SenTokenizer(input, configFile, compositRule); } catch (IOException e) { throw new RuntimeException("cannot initialize SenTokenizer: " + e.toString()); } } }
public class SenTokenizer extends Tokenizer { private StreamTagger tagger = null; private String configFile = null; private String compositRule = null; private static final HashSethash = new HashSet (); public SenTokenizer(Reader input, String configFile, String compositRule) throws IOException { super(input); this.configFile = configFile; this.compositRule = compositRule; init(input); } private void init(Reader input) throws IOException { tagger = new StreamTagger(input, configFile); synchronized(hash) { if (compositRule != null && !compositRule.equals("")) { if (!hash.contains(compositRule)) { CompositPostProcessor p = new CompositPostProcessor(); p.readRules(new BufferedReader(new StringReader( compositRule))); hash.add(compositRule); tagger.addPostProcessor(p); } } } } public Token next(Token token) throws IOException { if (!tagger.hasNext()) return null; net.java.sen.Token t = tagger.next(); if (t == null) return next(token); return token.reinit( t.getBasicString(), correctOffset(t.start()), correctOffset(t.end()), t.getPos()); } @Override public void reset(Reader input) throws IOException { super.reset(input); init(input); } }
<fieldType name="text_ja" class="solr.TextField"> <analyzer> <charFilter class="solr.MappingCharFilterFactory" mapping="mapping-ja.txt" /> <tokenizer class="mydomain.SenTokenizerFactory" /> </analyzer> </fieldType>
<fieldType name="text_ja" class="solr.TextField"> <analyzer class="org.apache.lucene.analysis.ja.JapaneseAnalyzer" /> </fieldType>