当我尝试访问“tareas”时,应用程序会返回给我,如下所示:
'Tareas' object has no attribute '__fields__'
Request Method: GET
Request URL: http://localhost:8000/tareas/
Django Version: 1.4
Exception Type: AttributeError
Exception Value:
'Tareas' object has no attribute '__fields__'
Exception Location: /Users/Tone/Documents/Proyectos/macrotelecom/common/generic.py in view_list, line 240
问题出在这一行:
if not fields:
fields=queryset.__dict__['model']().__fields__(profile)
我认为这很奇怪,因为在 admin.py 我为我的对象“Tareas”设置了属性字段:
from tareas.models import Tareas
from django.contrib import admin
class TareasAdmin(admin.ModelAdmin):
fields = ['idtarea','idtipotarea','idagente','fechatarea']
list_display = ('idtarea','idagente','fechatarea')
admin.site.register(Tareas,TareasAdmin)
该行在此函数“def_view”中定义:
def view_list(request, get_template, **kwargs):
'''
Generic list view with validation included and object transfering support
'''
# Config
default_rows_per_page=100
# Get arguments from the function
object_id=keyarg('object_id',kwargs,None)
action=keyarg('action',kwargs,None)
extra_context=keyarg('extra_context',kwargs,{})
queryset=keyarg('queryset',kwargs,None)
restrictions=keyarg('restrictions',kwargs,None)
permission=keyarg('permission',kwargs,None)
fields=keyarg('fields',kwargs,None)
default_ordering=keyarg('default_ordering',kwargs,None)
compact_rows=keyarg('compact_rows',kwargs,None)
# Get template and profile
namesp=str(queryset.__dict__['model']).replace("class ","").replace(">","").replace("<","").replace("'","").split(".")
appname=namesp[-3].lower()
modelname=namesp[-1].lower()
(profile,template_name)=get_template(keyarg('template_name',kwargs,"%s/%s_list.html" % (appname,modelname)))
# Check permissions
if (permission is not None) and (not request.user.has_perm(permission)):
return HttpResponseRedirect('/not_authorized/')
# Get extra arguments
extra={}
for arg in kwargs:
if arg not in ['object_id','action','template_name','extra_context','queryset','restrictions']:
extra[arg]=kwargs[arg]
# Inicialization
new_extra_context={}
new_extra_context['now']=epochdate(time.time())
# new_extra_context['msglog']=msglog()
# Restrictions fields
new_extra_context['filters']=[]
if restrictions:
for restriction in restrictions:
f={}
f['name']=restriction
f['value']=extra[restriction]
new_extra_context['filters'].append(f)
# Process the filter
new_extra_context['filters_obj']={}
new_extra_context['header_loop']=1
if restrictions:
queryset_obj={}
for rname in restrictions:
# Get name of the field and object
(rfield,robject)=restrictions[rname]
# Get the ID
rid=extra[rname]
# Save the id in extra_context
new_extra_context[rname]=rid
# Save the object in queryset_obj
queryset_obj[rname]=robject(id=rid)
# Filter the queryset
queryset=queryset.filter(eval("Q(%s=queryset_obj['%s'])" % (rfield,rname)))
new_extra_context['filters_obj'][rname]=get_object_or_404(robject,pk=rid)
# Get field list
if not fields:
fields=queryset.__dict__['model']().__fields__(profile)
# Save action if we got one
if action:
new_extra_context['action']=action
# Try to convert object_id to a numeric id
try:
object_id=int(object_id)
except:
pass
# Save GET values
new_extra_context['get']=[]
new_extra_context['getval']={}
for name in request.GET:
if name not in ['filtername','filtervalue']:
struct={}
struct['name']=name
if name=='rowsperpage':
struct['value']=default_rows_per_page
elif name=='page':
struct['value']=1
else:
struct['value']=request.GET[name]
new_extra_context['get'].append(struct)
new_extra_context['getval'][name]=struct['value']
# Filter on limits
limits=queryset.__dict__['model']().__limitQ__(profile,request)
qobjects=None
for name in limits:
if qobjects:
qobjects&=limits[name]
else:
qobjects=limits[name]
if qobjects:
queryset=queryset.filter(qobjects)
# Filters on fields
try:
filters_by_json=request.GET.get('filters','{}')
filters_by_struct=json_decode(str(filters_by_json))
except Exception:
filters_by_struct=[]
filtername=request.GET.get('filtername',None)
filtervalue=request.GET.get('filtervalue',None)
listfilters=queryset.__dict__['model']().__searchF__(profile)
# Process the search
filters_struct={}
for key in filters_by_struct:
# Get the value of the original filter
value=filters_by_struct[key]
# If there is something to filter, filter is not being changed and filter is known by the class
if (key!=filtername) and (key in listfilters) and (value>0):
# Add the filter to the queryset
f=listfilters[key]
fv=f[2][value-1][0]
queryset=queryset.filter(f[1](fv))
# Save it in the struct as a valid filter
filters_struct[key]=value
# Add the requested filter if any
if (filtername in listfilters) and (int(filtervalue)>0):
f=listfilters[filtername]
fv=f[2][int(filtervalue)-1][0]
queryset=queryset.filter(f[1](fv))
filters_struct[filtername]=int(filtervalue)
# Rewrite filters_json updated
filters_json=json_encode(filters_struct)
# Build the clean get for filters
get=new_extra_context['get']
filters_get=[]
for element in get:
if element['name'] not in ['filters']:
struct={}
struct['name']=element['name']
struct['value']=element['value']
filters_get.append(struct)
# Add filter_json
struct={}
struct['name']='filters'
struct['value']=filters_json
filters_get.append(struct)
new_extra_context['filters_get']=filters_get
# Get the list of filters allowed by this class
filters=[]
for key in listfilters:
choice=[_('All')]
for value in listfilters[key][2]:
choice.append(value[1])
# Decide the choosen field
if key in filters_struct.keys():
choose=int(filters_struct[key])
else:
choose=0
filters.append((key,listfilters[key][0],choice,choose))
new_extra_context['filters']=filters
# Search text in all fields
search=request.GET.get('search','')
new_extra_context['search']=search
datetimeQ=None
if len(search)>0:
searchs=queryset.__dict__['model']().__searchQ__(search,profile)
qobjects=None
for name in searchs:
if (searchs[name]=='datetime'):
datetimeQ=name
continue
else:
if qobjects:
qobjects|=searchs[name]
else:
qobjects=searchs[name]
if qobjects:
queryset=queryset.filter(qobjects)
else:
# Look for datetimeQ field
searchs=queryset.__dict__['model']().__searchQ__(search,profile)
for name in searchs:
if (searchs[name]=='datetime'):
datetimeQ=name
continue
# Datetime Q
new_extra_context['datetimeQ']=datetimeQ
if datetimeQ:
# Inicialization
f={}
f['year']=(1900,2100,False)
f['month']=(1,12,False)
f['day']=(1,31,False)
f['hour']=(0,23,False)
f['minute']=(0,59,False)
f['second']=(0,59,False)
date_elements=[None,'year','month','day','hour','minute','second']
# Get configuration of dates and set limits to the queryset
for element in date_elements[1:]:
value=request.GET.get(element,None)
if value:
f[element]=(int(value),int(value),True)
if f['year'][2] and f['month'][2] and not f['day'][2]:
(g,lastday)=calendar.monthrange(f['year'][1],f['month'][1])
f['day']=(f['day'][0],lastday,f['day'][2])
# Limits
date_min=datetime.datetime(f['year'][0], f['month'][0], f['day'][0], f['hour'][0], f['minute'][0], f['second'][0])
date_max=datetime.datetime(f['year'][1], f['month'][1], f['day'][1], f['hour'][1], f['minute'][1], f['second'][1])
queryset=queryset.filter(eval("( Q(%s__gte=date_min) & Q(%s__lte=date_max) ) | Q(%s=None)" % (datetimeQ,datetimeQ,datetimeQ)))
# Find actual deepness
deepness_index=0
for element in date_elements[1:]:
if f[element][2]:
deepness_index+=1
else:
break
# Get results from dates to set the new order
date_results=queryset.values_list(datetimeQ, flat=True) #.dates(datetimeQ,'day')
if f['day'][0]!=f['day'][1]:
if f['month'][0]==f['month'][1]:
date_results=date_results.dates(datetimeQ,'day')
elif f['year'][0]==f['year'][1]:
date_results=date_results.dates(datetimeQ,'month')
else:
date_results=date_results.dates(datetimeQ,'year')
get=new_extra_context['get']
new_extra_context['datefilter']={}
# Save the deepness
if (deepness_index+1==len(date_elements)):
new_extra_context['datefilter']['deepness']=None
else:
new_extra_context['datefilter']['deepness']=date_elements[deepness_index+1]
new_extra_context['datefilter']['deepnessback']=[]
new_extra_context['datefilter']['deepnessinit']=[]
for element in get:
if (not element['name'] in date_elements):
struct={}
struct['name']=element['name']
struct['value']=element['value']
new_extra_context['datefilter']['deepnessinit'].append(struct)
new_extra_context['datefilter']['deepnessback'].append(struct)
elif (element['name']!=date_elements[deepness_index] and f[element['name']][2]):
struct={}
struct['name']=element['name']
struct['value']=element['value']
new_extra_context['datefilter']['deepnessback'].append(struct)
# Build the list of elements
new_extra_context['datefilter']['data']=[]
for element in date_results:
# Save the data
new_extra_context['datefilter']['data'].append(element.timetuple()[deepness_index])
new_extra_context['datefilter']['data']=list(set(new_extra_context['datefilter']['data']))
new_extra_context['datefilter']['data'].sort()
# Prepare the rightnow result
if f['month'][2]:
month=_(month_name(f['month'][0]))
else:
month='__'
if f['hour'][2]:
rightnow="%s/%s/%s %s:%s:%s" % (grv(f,'day'),month,grv(f,'year'),grv(f,'hour'),grv(f,'minute'),grv(f,'second'))
else:
rightnow="%s/%s/%s" % (grv(f,'day'),month,grv(f,'year'))
new_extra_context['datefilter']['rightnow']=rightnow
# Distinct
queryset=queryset.distinct()
# Ordering field autofill
try:
order_by_json=request.GET.get('ordering','[]')
order_by_struct=json_decode(str(order_by_json))
except Exception:
order_by_struct=[]
order_by=[]
position={}
counter=1
for order in order_by_struct:
name=order.keys()[0]
direction=order[name]
if direction=='asc':
order_by.append("%s" % (name))
elif direction=='desc':
order_by.append("-%s" % (name))
position[name]=counter
counter+=1
if order_by:
queryset=queryset.order_by(*order_by)
elif default_ordering:
queryset=queryset.order_by(default_ordering)
else:
queryset=queryset.order_by("pk")
# Check the total count of registers
total_registers=queryset.count()
# Ordering field autofill
sort={}
for value in fields:
# Get values
name=value[0]
publicname=value[1]
if len(value)>2:
size=value[2]
else:
size=None
if len(value)>3:
align=value[3]
else:
align=None
# Process ordering
ordering=[]
found=False
for order in order_by_struct:
subname=order.keys()[0]
direction=order[subname]
if name==subname:
if direction == 'desc':
direction = ''
sort_class='headerSortUp'
elif direction == 'asc':
direction = 'desc'
sort_class='headerSortDown'
else:
sort_class=''
direction = 'asc'
found=True
if direction == 'asc' or direction=='desc':
ordering.append({subname:direction})
if not found:
ordering.append({name:'asc'})
sort_class=''
# Save the ordering method
sort[name]={}
sort[name]['id']=name
sort[name]['name']=publicname
sort[name]['class']=sort_class
sort[name]['size']=size
sort[name]['align']=align
if name:
sort[name]['ordering']=json_encode(ordering).replace('"','\\"')
if name in position:
sort[name]['position']=position[name]
# Pagination
# IMPORTANT: This part is commented because I don't manage to control rowsperpage from urls.py file, it is remembering last query instead
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
#if 'rowsperpage' in extra_context:
# rowsperpage=extra_context['rowsperpage']
#else:
# rowsperpage=default_rows_per_page
#total_rows_per_page=request.GET.get('rowsperpage',rowsperpage)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
total_rows_per_page=request.GET.get('rowsperpage',default_rows_per_page)
if total_rows_per_page:
try:
total_rows_per_page = int(total_rows_per_page)
except Exception:
total_rows_per_page = 'All'
else:
# IMPORTANT: Commented as before coded
# total_rows_per_page = rowsperpage
total_rows_per_page = default_rows_per_page
if total_rows_per_page == 'All':
page_number=1
total_rows_per_page = total_registers
total_rows_per_page_out = _('All')
total_pages=1
else:
total_rows_per_page = int(total_rows_per_page) # By default 10 rows per page
total_rows_per_page_out = total_rows_per_page
total_pages=total_registers/total_rows_per_page
if total_registers%total_rows_per_page:
total_pages+=1
page_number=request.GET.get('page',1) # If no page specified use first page
if page_number=='last':
page_number=total_pages
else:
try:
page_number=int(page_number)
except:
page_number=1
if page_number>total_pages:
page_number=total_pages
# Build the list of page counters allowed
choice=[]
c=default_rows_per_page
chk=1
while total_registers>=c:
choice.append(c)
if chk==1:
# From 5 to 10
c=c*2
# Next level
chk=2
elif chk==2:
# From 10 to 25 (10*2+10/2)
c=c*2+c/2
# Next level
chk=3
elif chk==3:
# From 25 to 50
c*=2
chk=1
# Add all choice in any case
choice.append(_('All'))
# Save the pagination in the structure
new_extra_context['rowsperpageallowed']=choice
new_extra_context['rowsperpage']=total_rows_per_page_out
new_extra_context['pagenumber']=page_number
if type(object_id)==type(u'abc'):
# If object_id is a string, we have a name not an object
new_extra_context['object_name']=object_id
object_obj = None
else:
# If is not an string
if object_id:
# If we got one, load the object
obj=extra_context['obj']
object_obj = get_object_or_404(obj, pk=object_id)
else:
# There is no object
object_obj = None
new_extra_context['object_obj']=object_obj
# Build the columns structure
new_extra_context['columns']=[]
for value in fields:
field=value[0]
new_extra_context['columns'].append(sort[field])
# Get the full number of registers and save it to extra_context
new_extra_context['total_registers']=total_registers
if total_rows_per_page=='All':
# Remove total_rows_per_page if is all
total_rows_per_page=None
new_extra_context['page_before']=None
new_extra_context['page_after']=None
new_extra_context['start_register']=1
new_extra_context['showing_registers']=total_registers
else:
# Page before
if page_number<=1:
new_extra_context['page_before']=None
else:
new_extra_context['page_before']=page_number-1
# Page after
if page_number>=total_pages:
new_extra_context['page_after']=None
else:
new_extra_context['page_after']=page_number+1
# Starting on register number
new_extra_context['start_register']=(page_number-1)*total_rows_per_page+1
new_extra_context['showing_registers']=total_rows_per_page
# Calculate end
new_extra_context['end_register']=min(new_extra_context['start_register']+new_extra_context['showing_registers']-1,total_registers)
# If compact rows
hide_head=[]
hide_tail=[]
hide_subhead=[]
hide_subtail=[]
if compact_rows:
(compact_field,compact_subelements)=compact_rows
lastvalue=None
lastrow=None
total_subelements=0
for row in queryset:
value=eval("row.%s" % (compact_field))
# Count the subelements from this row
if compact_subelements:
count_subelements=eval("row.%s.count()" % (compact_subelements))
else:
count_subelements=1
# If the new row belongs to the same group than the row before
if value==lastvalue:
# Hide the head from this row
hide_head.append(row.id)
# Hide the tail from the last row
hide_tail.append(lastrow.id)
# If there were elements in the group (somebody already opened the subhead, hide the head of the subgroup) or if this row has no elements (no need to open this subhead)
if total_subelements>0 or count_subelements==0:
# Hid the subhead
hide_subhead.append(row.id)
# Hide the tail of the last row, since we want to connect both groups
hide_subtail.append(lastrow.id)
# Add the total count of elements
total_subelements+=count_subelements
# This row doesn't belong to the opened group
else:
# If there was some row already and there are no elements in the group (nobody opened the group, so we don't have to close it either)
if lastrow and total_subelements==0:
# Hide the tail from the group
hide_subtail.append(lastrow.id)
# Startup a new count of elements (Reset the total count of subelements)
total_subelements=0
total_subelements+=count_subelements
# If the new group doesn't have element (we don't think about opening the group)
if total_subelements==0:
# Hide the head from this group
hide_subhead.append(row.id)
# Remember
lastvalue=value
lastrow=row
# Proper closing the group after the bucle if there was some row opened
if lastrow and total_subelements==0:
# Hide the tail from the group if was no element in the group (nobody opened the group)
hide_subtail.append(lastrow.id)
# Save it in the public structure
new_extra_context['hide_head']=hide_head
new_extra_context['hide_tail']=hide_tail
new_extra_context['hide_subhead']=hide_subhead
new_extra_context['hide_subtail']=hide_subtail
# Save extra context
extra_context.update(new_extra_context)
# Empty results are empty
if page_number==0:
total_rows_per_page=0
# Return results
return object_list(request, queryset=queryset, template_name=template_name, extra_context=extra_context, paginate_by=total_rows_per_page, page=page_number)
它在 'Tareas' 的 urls.py 中调用的内容:
from django.conf.urls import patterns, include, url
from tareas.models import Tareacobro, Tipotarea, Agentes, Perfil, Tareas
from django.conf import settings
from common.generic import view_list
# Uncomment the next two lines to enable the admin:
info_tasks = {'queryset': Tareas.objects.all()}
urlpatterns = patterns('tareas.views',
# =====TASKS======
# url(r'^$','tareas'),
(r'^', view_list, dict( info_tasks, extra_context={'obj':Tareas} ),'admin/tareas/tareas'),
# (r'^$',view_list, dict(info_tasks),'admin/tareas/tareas'),
#url(r'^$',view_list, dict(info_tasks, extra_context={'obj':Tareas} ),'tareas'),
)