Skip to content

Commit

Permalink
add readme
Browse files Browse the repository at this point in the history
  • Loading branch information
APTX-4869-MDZZ committed Nov 1, 2017
1 parent 318cdab commit ec281eb
Show file tree
Hide file tree
Showing 12 changed files with 188 additions and 175 deletions.
1 change: 0 additions & 1 deletion 1.json

This file was deleted.

1 change: 0 additions & 1 deletion 1.txt

This file was deleted.

47 changes: 14 additions & 33 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,11 @@

##System Introduction

For biologists, it is quite frustrating to find suitable BioBricks and collect useful genetic information in large volumes of literature. Now with BioDesigner Coral, biologists can search BioBricks, design with the help of recommendations and obtain genetic information of BioBricks in a more comprehensive way. It can analyze user¡¯s design, then give recommendations about parts they may need. Through analysis of massive literature, we find useful information about genes, BioBricks, and the relations between genes. All genetic information will be exhibited in a network graph through visualization method to help users understand and use them better. Clicking on the nodes in the network, which represent genes or BioBricks, users can obtain relatively accurate information about related genes, corresponding protein, and relevant literature. We hope BioDesigner Coral can relieve the arduous work in labs and give inspirations to synthetic biologists.
For biologists, it is quite frustrating to find suitable BioBricks and collect useful genetic information in large volumes of literature. Now with BioDesigner Coral, biologists can search BioBricks, design with the help of recommendations and obtain genetic information of BioBricks in a more comprehensive way. It can analyze user's design, then give recommendations about parts they may need. Through analysis of massive literature, we find useful information about genes, BioBricks, and the relations between genes. All genetic information will be exhibited in a network graph through visualization method to help users understand and use them better. Clicking on the nodes in the network, which represent genes or BioBricks, users can obtain relatively accurate information about related genes, corresponding protein, and relevant literature. We hope BioDesigner Coral can relieve the arduous work in labs and give inspirations to synthetic biologists.

##Structure of the project

+ `biosearch: ` Wiki search function
+ `accounts: ` User information management, such as register, login and so on.
+ `geneRelationship: ` Functions related to gene.
+ `projectManage: ` Functions related to project. such as create a new project, delete a project, create a new device and so on.
Expand All @@ -30,42 +31,22 @@ For biologists, it is quite frustrating to find suitable BioBricks and collect u
+ Pillow
+ pymongo

##Install
##Install docker

Django install:
+ Please make sure that you have installed docker and run it fisrt.

pip install Django==$DJANGO_VERSION

Mysql-python install:
+ $ sudo apt-get update
+ $ sudo apt-get install docker

pip install MySQL-python

Elasticsearch install & run:
## install docker-compose

wget https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch-1.7.2.zip
unzip elasticsearch-1.7.2.zip
./elasticsearch-1.7.2/bin/elasticsearch -d

or
+ $ sudo curl -L https://github.com/docker/compose/releases/download/1.16.1/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
+ $ sudo chmod +x /usr/local/bin/docker-compose

pip install elasticsearch

Pillow
## run a new container

pip install pillow
+ $ git pull (the url of docker)
+ $ cd docker
+ $ sudo docker-compose up --build

pymongo

pip install pymongo

Database import

mysql -e 'CREATE DATABASE biodesigner'
python manage.py syncdb --noinput
mysql -e 'source xxx.sql' -u username --password=password biodesigner;

> sql source file can downloads from github
Run server

python manage.py runserver
### Enter http://127.0.0.1 in a browser to see the application running.
58 changes: 0 additions & 58 deletions addId.py

This file was deleted.

53 changes: 0 additions & 53 deletions transportDataToES.py

This file was deleted.

42 changes: 42 additions & 0 deletions utils/import tool/addId.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
import MySQLdb

connect = MySQLdb.connect(
host='localhost',
port=3306,
user='root',
passwd='qaz123',
db='biodesignver'
)
cur = connect.cursor()
cur2 = connect.cursor()

connect1 = MySQLdb.connect(
host='localhost',
port=3306,
user='root',
passwd='qaz123',
db='wiki'
)
cur1 = connect1.cursor()

cur.execute("select * from simpepart")
part = cur.fetchone()
while part:
teams = part[6]
if teams!="":
teams = teams.split(',')
ids = list()
for team in teams:
print(team)
position = team.find('_')
year = int(team[0:position])
name = team[position+1:]
cur1.execute("select * from team where year=%d and team_name='%s'"%(year, name))
team = cur1.fetchone()
if team:
print(team[18])
ids.append(team[18])
teamId = ','.join(ids)
cur2.execute("update simpepart set teamId='%s' where part_id=%d"%(teamId, part[0]))
part = cur.fetchone()
connect.commit()
58 changes: 29 additions & 29 deletions utils/getId.py → utils/import tool/getId.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,29 @@
from elasticsearch import Elasticsearch
import MySQLdb
connect = MySQLdb.connect(
host='localhost',
port=3306,
user='root',
passwd='',
db='wiki'
)
cur = connect.cursor()

es = Elasticsearch()
query = {
"size":5000,
"query": {
"match_all": {}
}
}

_searched = es.search(index='team_wiki', doc_type='wiki',body=query)
teams = _searched['hits']['hits']
for team in teams:
id = team["_id"]
team = team["_source"]
team_name = team["team_name"]
year = team["year"]
print(year, team_name)
cur.execute("update team set _id='%s' where year = %d and team_name='%s'"%(id, int(year), team_name))
connect.commit()
from elasticsearch import Elasticsearch
import MySQLdb
connect = MySQLdb.connect(
host='localhost',
port=3306,
user='root',
passwd='qaz123',
db='wiki'
)
cur = connect.cursor()

es = Elasticsearch()
query = {
"size":5000,
"query": {
"match_all": {}
}
}

_searched = es.search(index='team_wiki', doc_type='wiki',body=query)
teams = _searched['hits']['hits']
for team in teams:
id = team["_id"]
team = team["_source"]
team_name = team["team_name"]
year = team["year"]
print(year, team_name)
cur.execute("update team set _id='%s' where year = %d and team_name='%s'"%(id, int(year), team_name))
connect.commit()
103 changes: 103 additions & 0 deletions utils/import tool/searchDataToES.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
# -*- coding:utf-8 -*-
from elasticsearch import Elasticsearch

# copy data from mongodb to elasticsearch
import pymysql

# 打开数据库连接
db = pymysql.connect("localhost","root","qaz123","wiki")

# 使用cursor()方法获取操作游标
cursor = db.cursor()

# SQL 查询语句
sql = "SELECT * FROM team"

def buildIndexAndMapping(_es):
_index_mappings = {
"mappings": {
"wiki": {
"properties": {
"year" : {"type": "keyword"},
"team_name" : {"type": "text"},
"background" : {"type": "text"},
"description" : {"type": "text"},
"design" : {"type": "text"},
"human_practice" : {"type": "text"},
"modeling" : {"type": "text"},
"notebook" : {"type": "text"},
"protocol" : {"type": "text"},
"result" : {"type": "text"},
"track" : {"type": "keyword"},
"recommend": {"type": "text"},
"recommendWords": {"type": "text"},
"biobrick": {"type": "text"},
"keywords" : {"type": "text"},
"theme" : {"type": "keyword"},
"awards" : {"type": "text"},
"type": {"type": "text"},
"medal": {"type": "text"},
"calScore": {"type": "integer"},
"link": {"type": "text"}
}
},
}
}
if _es.indices.exists(index='team_wiki') is not True:
_es.indices.create(index='team_wiki', body=_index_mappings)

def writeData(_es):
#这里的数据是测试用的 ,,,
user_cursor = []
try:
# 执行SQL语句
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
for row in results:
tmp = {
"year" : row[0].__str__(),
"team_name" : row[1].__str__(),
"background" : row[2].__str__(),
"description" : row[3].__str__(),
"design" : row[4].__str__(),
"human_practice" : row[5].__str__(),
"modeling" : row[6].__str__(),
"notebook" : row[7].__str__(),
"protocol" : row[8].__str__(),
"result" : row[9].__str__(),
"track" : row[10].__str__(),
"recommend": row[11].__str__(),
"recommendWords": row[12].__str__(),
"biobrick": row[13].__str__(),
"keywords" : row[14].__str__(),
"theme" : row[15].__str__(),
"awards" : row[16].__str__(),
"type": row[17].__str__(),
"medal": row[18].__str__(),
"calScore": row[19].__str__(),
"link": row[20].__str__()
}
user_cursor.append(tmp)
except:
print("Error: unable to fetch data")
processed = 0
for _doc in user_cursor:
try:
_es.index(index='team_wiki', doc_type='wiki', refresh=True, body=_doc)
processed += 1
print('Processed: ' + str(processed), flush=True)
except Exception as e:
print(e)


def mainFunc():
print("copy data from mongodb to elasticsearch")
es = Elasticsearch()
buildIndexAndMapping(es)
print("search finished")
writeData(es)


if __name__ == '__main__':
mainFunc()
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.

0 comments on commit ec281eb

Please sign in to comment.